当前位置: 首页>>代码示例>>Java>>正文


Java Result.getColumnLatestCell方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.Result.getColumnLatestCell方法的典型用法代码示例。如果您正苦于以下问题:Java Result.getColumnLatestCell方法的具体用法?Java Result.getColumnLatestCell怎么用?Java Result.getColumnLatestCell使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.client.Result的用法示例。


在下文中一共展示了Result.getColumnLatestCell方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getRegionServer

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
 * Returns the {@link ServerName} from catalog table {@link Result}
 * where the region is transitioning. It should be the same as
 * {@link HRegionInfo#getServerName(Result)} if the server is at OPEN state.
 * @param r Result to pull the transitioning server name from
 * @return A ServerName instance or {@link HRegionInfo#getServerName(Result)}
 * if necessary fields not found or empty.
 */
static ServerName getRegionServer(final Result r, int replicaId) {
  Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, getServerNameColumn(replicaId));
  if (cell == null || cell.getValueLength() == 0) {
    RegionLocations locations = MetaTableAccessor.getRegionLocations(r);
    if (locations != null) {
      HRegionLocation location = locations.getRegionLocation(replicaId);
      if (location != null) {
        return location.getServerName();
      }
    }
    return null;
  }
  return ServerName.parseServerName(Bytes.toString(cell.getValueArray(),
    cell.getValueOffset(), cell.getValueLength()));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:RegionStateStore.java

示例2: verifyNumericRows

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
public void verifyNumericRows(Table table, final byte[] f, int startRow, int endRow,
    int replicaId)
    throws IOException {
  for (int i = startRow; i < endRow; i++) {
    String failMsg = "Failed verification of row :" + i;
    byte[] data = Bytes.toBytes(String.valueOf(i));
    Get get = new Get(data);
    get.setReplicaId(replicaId);
    get.setConsistency(Consistency.TIMELINE);
    Result result = table.get(get);
    assertTrue(failMsg, result.containsColumn(f, null));
    assertEquals(failMsg, result.getColumnCells(f, null).size(), 1);
    Cell cell = result.getColumnLatestCell(f, null);
    assertTrue(failMsg,
      Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
        cell.getValueLength()));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:HBaseTestingUtility.java

示例3: getServerName

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
 * @deprecated use MetaTableAccessor methods for interacting with meta layouts
 */
@Deprecated
public static ServerName getServerName(final Result r) {
  Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
  if (cell == null || cell.getValueLength() == 0) return null;
  String hostAndPort = Bytes.toString(
      cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
  cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY,
    HConstants.STARTCODE_QUALIFIER);
  if (cell == null || cell.getValueLength() == 0) return null;
  try {
    return ServerName.valueOf(hostAndPort,
        Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
  } catch (IllegalArgumentException e) {
    LOG.error("Ignoring invalid region for server " + hostAndPort + "; cell=" + cell, e);
    return null;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:HRegionInfo.java

示例4: getRegionState

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
 * Pull the region state from a catalog table {@link Result}.
 * @param r Result to pull the region state from
 * @return the region state, or OPEN if there's no value written.
 */
static State getRegionState(final Result r, int replicaId) {
  Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, getStateColumn(replicaId));
  if (cell == null || cell.getValueLength() == 0) return State.OPEN;
  return State.valueOf(Bytes.toString(cell.getValueArray(),
    cell.getValueOffset(), cell.getValueLength()));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:RegionStateStore.java

示例5: testVisibilityLabelsWithGet

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
@Test
public void testVisibilityLabelsWithGet() throws Exception {
  TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
  try (Table table = createTableAndWriteDataWithLabels(tableName, SECRET + "&" + CONFIDENTIAL
      + "&!" + PRIVATE, SECRET + "&" + CONFIDENTIAL + "&" + PRIVATE)) {
    Get get = new Get(row1);
    get.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL));
    Result result = table.get(get);
    assertTrue(!result.isEmpty());
    Cell cell = result.getColumnLatestCell(fam, qual);
    assertTrue(Bytes.equals(value, 0, value.length, cell.getValueArray(), cell.getValueOffset(),
        cell.getValueLength()));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:TestVisibilityLabels.java

示例6: extractAuths

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
protected List<String> extractAuths(String user, List<Result> results) {
  List<String> auths = new ArrayList<String>();
  for (Result result : results) {
    Cell labelCell = result.getColumnLatestCell(LABELS_TABLE_FAMILY, LABEL_QUALIFIER);
    Cell userAuthCell = result.getColumnLatestCell(LABELS_TABLE_FAMILY, user.getBytes());
    if (userAuthCell != null) {
      auths.add(Bytes.toString(labelCell.getValueArray(), labelCell.getValueOffset(),
          labelCell.getValueLength()));
    }
  }
  return auths;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:TestVisibilityLabels.java

示例7: assertEmptyMetaLocation

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
public static void assertEmptyMetaLocation(Table meta, byte[] row, int replicaId)
    throws IOException {
  Get get = new Get(row);
  Result result = meta.get(get);
  Cell serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
    MetaTableAccessor.getServerColumn(replicaId));
  Cell startCodeCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
    MetaTableAccessor.getStartCodeColumn(replicaId));
  assertNotNull(serverCell);
  assertNotNull(startCodeCell);
  assertEquals(0, serverCell.getValueLength());
  assertEquals(0, startCodeCell.getValueLength());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:TestMetaTableAccessor.java

示例8: testMastersSystemTimeIsUsedInUpdateLocations

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
 * Tests whether maximum of masters system time versus RSs local system time is used
 */
@Test
public void testMastersSystemTimeIsUsedInUpdateLocations() throws IOException {
  long regionId = System.currentTimeMillis();
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf("table_foo"),
    HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false, regionId, 0);

  ServerName sn = ServerName.valueOf("bar", 0, 0);
  Table meta = MetaTableAccessor.getMetaHTable(connection);
  try {
    List<HRegionInfo> regionInfos = Lists.newArrayList(regionInfo);
    MetaTableAccessor.addRegionsToMeta(connection, regionInfos, 1);

    long masterSystemTime = EnvironmentEdgeManager.currentTime() + 123456789;
    MetaTableAccessor.updateRegionLocation(connection, regionInfo, sn, 1, masterSystemTime);

    Get get = new Get(regionInfo.getRegionName());
    Result result = meta.get(get);
    Cell serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
        MetaTableAccessor.getServerColumn(0));
    Cell startCodeCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
      MetaTableAccessor.getStartCodeColumn(0));
    Cell seqNumCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
      MetaTableAccessor.getSeqNumColumn(0));
    assertNotNull(serverCell);
    assertNotNull(startCodeCell);
    assertNotNull(seqNumCell);
    assertTrue(serverCell.getValueLength() > 0);
    assertTrue(startCodeCell.getValueLength() > 0);
    assertTrue(seqNumCell.getValueLength() > 0);
    assertEquals(masterSystemTime, serverCell.getTimestamp());
    assertEquals(masterSystemTime, startCodeCell.getTimestamp());
    assertEquals(masterSystemTime, seqNumCell.getTimestamp());
  } finally {
    meta.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:40,代码来源:TestMetaTableAccessor.java

示例9: testAppendTimestampsAreMonotonic

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
@Test
public void testAppendTimestampsAreMonotonic() throws IOException {
  HRegion region = initHRegion(tableName, name.getMethodName(), CONF, fam1);
  ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
  EnvironmentEdgeManager.injectEdge(edge);

  edge.setValue(10);
  Append a = new Append(row);
  a.setDurability(Durability.SKIP_WAL);
  a.add(fam1, qual1, qual1);
  region.append(a);

  Result result = region.get(new Get(row));
  Cell c = result.getColumnLatestCell(fam1, qual1);
  assertNotNull(c);
  assertEquals(c.getTimestamp(), 10L);

  edge.setValue(1); // clock goes back
  region.append(a);
  result = region.get(new Get(row));
  c = result.getColumnLatestCell(fam1, qual1);
  assertEquals(c.getTimestamp(), 10L);

  byte[] expected = new byte[qual1.length*2];
  System.arraycopy(qual1, 0, expected, 0, qual1.length);
  System.arraycopy(qual1, 0, expected, qual1.length, qual1.length);

  assertTrue(Bytes.equals(c.getValueArray(), c.getValueOffset(), c.getValueLength(),
    expected, 0, expected.length));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:31,代码来源:TestHRegion.java

示例10: verifyTestDataBatch

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
static void verifyTestDataBatch(Configuration conf, TableName tableName,
    int batchId) throws Exception {
  LOG.debug("Verifying test data batch " + batchId);
  Table table = new HTable(conf, tableName);
  for (int i = 0; i < NUM_ROWS_PER_BATCH; ++i) {
    Get get = new Get(getRowKey(batchId, i));
    Result result = table.get(get);
    for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
      Cell kv = result.getColumnLatestCell(CF_BYTES, getQualifier(j));
      assertTrue(CellUtil.matchingValue(kv, getValue(batchId, i, j)));
    }
  }
  table.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:TestChangingEncoding.java

示例11: getServerName

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
 * Returns a {@link ServerName} from catalog table {@link Result}.
 * @param r Result to pull from
 * @return A ServerName instance or null if necessary fields not found or empty.
 */
private static ServerName getServerName(final Result r, final int replicaId) {
  byte[] serverColumn = getServerColumn(replicaId);
  Cell cell = r.getColumnLatestCell(getFamily(), serverColumn);
  if (cell == null || cell.getValueLength() == 0) return null;
  String hostAndPort = Bytes.toString(
    cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
  byte[] startcodeColumn = getStartCodeColumn(replicaId);
  cell = r.getColumnLatestCell(getFamily(), startcodeColumn);
  if (cell == null || cell.getValueLength() == 0) return null;
  return ServerName.valueOf(hostAndPort,
    Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:MetaTableAccessor.java

示例12: main

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
public static void main(String[] args) {
	try {
		Configuration conf = new Configuration();
		conf.set("fs.defaultFS", "hdfs://hadoop1:8020");
		conf.set("yarn.resourcemanager.hostname", "hadoop1");
		conf.set("hbase.zookeeper.quorum", "hadoop1,hadoop2,hadoop3");
		conf = HBaseConfiguration.create(conf);
		HTable table = new HTable(conf, "event_logs".getBytes());

		String date = "2016-03-23";
		long startDate = TimeUtil.parseString2Long(date);
		long endDate = startDate + GlobalConstants.DAY_OF_MILLISECONDS;
		System.out.println();
		Scan scan = new Scan();
		// 定义hbase扫描的开始rowkey和结束rowkey
		scan.setStartRow(Bytes.toBytes("" + startDate));
		scan.setStopRow(Bytes.toBytes("" + endDate));

		FilterList filterList = new FilterList();
		// 过滤数据,只分析launch事件
		filterList.addFilter(new SingleColumnValueFilter(Bytes.toBytes(EventLogConstants.EVENT_LOGS_FAMILY_NAME),
				Bytes.toBytes(EventLogConstants.LOG_COLUMN_NAME_EVENT_NAME), CompareOp.EQUAL,
				Bytes.toBytes(EventEnum.LAUNCH.alias)));
		// 定义mapper中需要获取的列名
		String[] columns = new String[] { EventLogConstants.LOG_COLUMN_NAME_EVENT_NAME,
				EventLogConstants.LOG_COLUMN_NAME_UUID, EventLogConstants.LOG_COLUMN_NAME_SERVER_TIME,
				EventLogConstants.LOG_COLUMN_NAME_PLATFORM, EventLogConstants.LOG_COLUMN_NAME_BROWSER_NAME,
				EventLogConstants.LOG_COLUMN_NAME_BROWSER_VERSION };
		// scan.addColumn(family, qualifier)
		filterList.addFilter(getColumnFilter(columns));

		scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(EventLogConstants.HBASE_NAME_EVENT_LOGS));
		scan.setFilter(filterList);

		ResultScanner ress = table.getScanner(scan);
		for (Result res : ress) {
			Cell cell = res.getColumnLatestCell("info".getBytes(),
					EventLogConstants.LOG_COLUMN_NAME_UUID.getBytes());
			System.out.println(new String(CellUtil.cloneValue(cell)));
		}
		ress.close();
	} catch (Exception e) {
		// TODO Auto-generated catch block
		e.printStackTrace();
	}
}
 
开发者ID:liuhaozzu,项目名称:big_data,代码行数:47,代码来源:TestHbase.java

示例13: testMastersSystemTimeIsUsedInMergeRegions

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
@Test
public void testMastersSystemTimeIsUsedInMergeRegions() throws IOException {
  long regionId = System.currentTimeMillis();
  HRegionInfo regionInfoA = new HRegionInfo(TableName.valueOf("table_foo"),
    HConstants.EMPTY_START_ROW, new byte[] {'a'}, false, regionId, 0);
  HRegionInfo regionInfoB = new HRegionInfo(TableName.valueOf("table_foo"),
    new byte[] {'a'}, HConstants.EMPTY_END_ROW, false, regionId, 0);
  HRegionInfo mergedRegionInfo = new HRegionInfo(TableName.valueOf("table_foo"),
    HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false, regionId, 0);

  ServerName sn = ServerName.valueOf("bar", 0, 0);
  Table meta = MetaTableAccessor.getMetaHTable(connection);
  try {
    List<HRegionInfo> regionInfos = Lists.newArrayList(regionInfoA, regionInfoB);
    MetaTableAccessor.addRegionsToMeta(connection, regionInfos, 1);

    // write the serverName column with a big current time, but set the masters time as even
    // bigger. When region merge deletes the rows for regionA and regionB, the serverName columns
    // should not be seen by the following get
    long serverNameTime = EnvironmentEdgeManager.currentTime()   + 100000000;
    long masterSystemTime = EnvironmentEdgeManager.currentTime() + 123456789;

    // write the serverName columns
    MetaTableAccessor.updateRegionLocation(connection, regionInfoA, sn, 1, serverNameTime);

    // assert that we have the serverName column with expected ts
    Get get = new Get(mergedRegionInfo.getRegionName());
    Result result = meta.get(get);
    Cell serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
        MetaTableAccessor.getServerColumn(0));
    assertNotNull(serverCell);
    assertEquals(serverNameTime, serverCell.getTimestamp());

    // now merge the regions, effectively deleting the rows for region a and b.
    MetaTableAccessor.mergeRegions(connection, mergedRegionInfo,
      regionInfoA, regionInfoB, sn, 1, masterSystemTime);

    result = meta.get(get);
    serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
        MetaTableAccessor.getServerColumn(0));
    Cell startCodeCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
      MetaTableAccessor.getStartCodeColumn(0));
    Cell seqNumCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
      MetaTableAccessor.getSeqNumColumn(0));
    assertNull(serverCell);
    assertNull(startCodeCell);
    assertNull(seqNumCell);
  } finally {
    meta.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:52,代码来源:TestMetaTableAccessor.java

示例14: testIncrementTimestampsAreMonotonic

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
@Test
public void testIncrementTimestampsAreMonotonic() throws IOException {
  HRegion region = initHRegion(tableName, name.getMethodName(), CONF, fam1);
  ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
  EnvironmentEdgeManager.injectEdge(edge);

  edge.setValue(10);
  Increment inc = new Increment(row);
  inc.setDurability(Durability.SKIP_WAL);
  inc.addColumn(fam1, qual1, 1L);
  region.increment(inc);

  Result result = region.get(new Get(row));
  Cell c = result.getColumnLatestCell(fam1, qual1);
  assertNotNull(c);
  assertEquals(c.getTimestamp(), 10L);

  edge.setValue(1); // clock goes back
  region.increment(inc);
  result = region.get(new Get(row));
  c = result.getColumnLatestCell(fam1, qual1);
  assertEquals(c.getTimestamp(), 10L);
  assertEquals(Bytes.toLong(c.getValueArray(), c.getValueOffset(), c.getValueLength()), 2L);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:TestHRegion.java

示例15: getSeqNumDuringOpen

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
 * The latest seqnum that the server writing to meta observed when opening the region.
 * E.g. the seqNum when the result of {@link #getServerName(Result)} was written.
 * @param r Result to pull the seqNum from
 * @return SeqNum, or HConstants.NO_SEQNUM if there's no value written.
 * @deprecated use MetaTableAccessor methods for interacting with meta layouts
 */
@Deprecated
public static long getSeqNumDuringOpen(final Result r) {
  Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER);
  if (cell == null || cell.getValueLength() == 0) return HConstants.NO_SEQNUM;
  return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:HRegionInfo.java


注:本文中的org.apache.hadoop.hbase.client.Result.getColumnLatestCell方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。