當前位置: 首頁>>代碼示例>>Java>>正文


Java Result.getColumnLatestCell方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.client.Result.getColumnLatestCell方法的典型用法代碼示例。如果您正苦於以下問題:Java Result.getColumnLatestCell方法的具體用法?Java Result.getColumnLatestCell怎麽用?Java Result.getColumnLatestCell使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.client.Result的用法示例。


在下文中一共展示了Result.getColumnLatestCell方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: getRegionServer

import org.apache.hadoop.hbase.client.Result; //導入方法依賴的package包/類
/**
 * Returns the {@link ServerName} from catalog table {@link Result}
 * where the region is transitioning. It should be the same as
 * {@link HRegionInfo#getServerName(Result)} if the server is at OPEN state.
 * @param r Result to pull the transitioning server name from
 * @return A ServerName instance or {@link HRegionInfo#getServerName(Result)}
 * if necessary fields not found or empty.
 */
static ServerName getRegionServer(final Result r, int replicaId) {
  Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, getServerNameColumn(replicaId));
  if (cell == null || cell.getValueLength() == 0) {
    RegionLocations locations = MetaTableAccessor.getRegionLocations(r);
    if (locations != null) {
      HRegionLocation location = locations.getRegionLocation(replicaId);
      if (location != null) {
        return location.getServerName();
      }
    }
    return null;
  }
  return ServerName.parseServerName(Bytes.toString(cell.getValueArray(),
    cell.getValueOffset(), cell.getValueLength()));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:24,代碼來源:RegionStateStore.java

示例2: verifyNumericRows

import org.apache.hadoop.hbase.client.Result; //導入方法依賴的package包/類
public void verifyNumericRows(Table table, final byte[] f, int startRow, int endRow,
    int replicaId)
    throws IOException {
  for (int i = startRow; i < endRow; i++) {
    String failMsg = "Failed verification of row :" + i;
    byte[] data = Bytes.toBytes(String.valueOf(i));
    Get get = new Get(data);
    get.setReplicaId(replicaId);
    get.setConsistency(Consistency.TIMELINE);
    Result result = table.get(get);
    assertTrue(failMsg, result.containsColumn(f, null));
    assertEquals(failMsg, result.getColumnCells(f, null).size(), 1);
    Cell cell = result.getColumnLatestCell(f, null);
    assertTrue(failMsg,
      Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
        cell.getValueLength()));
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:19,代碼來源:HBaseTestingUtility.java

示例3: getServerName

import org.apache.hadoop.hbase.client.Result; //導入方法依賴的package包/類
/**
 * @deprecated use MetaTableAccessor methods for interacting with meta layouts
 */
@Deprecated
public static ServerName getServerName(final Result r) {
  Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
  if (cell == null || cell.getValueLength() == 0) return null;
  String hostAndPort = Bytes.toString(
      cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
  cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY,
    HConstants.STARTCODE_QUALIFIER);
  if (cell == null || cell.getValueLength() == 0) return null;
  try {
    return ServerName.valueOf(hostAndPort,
        Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
  } catch (IllegalArgumentException e) {
    LOG.error("Ignoring invalid region for server " + hostAndPort + "; cell=" + cell, e);
    return null;
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:21,代碼來源:HRegionInfo.java

示例4: getRegionState

import org.apache.hadoop.hbase.client.Result; //導入方法依賴的package包/類
/**
 * Pull the region state from a catalog table {@link Result}.
 * @param r Result to pull the region state from
 * @return the region state, or OPEN if there's no value written.
 */
static State getRegionState(final Result r, int replicaId) {
  Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, getStateColumn(replicaId));
  if (cell == null || cell.getValueLength() == 0) return State.OPEN;
  return State.valueOf(Bytes.toString(cell.getValueArray(),
    cell.getValueOffset(), cell.getValueLength()));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:12,代碼來源:RegionStateStore.java

示例5: testVisibilityLabelsWithGet

import org.apache.hadoop.hbase.client.Result; //導入方法依賴的package包/類
@Test
public void testVisibilityLabelsWithGet() throws Exception {
  TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
  try (Table table = createTableAndWriteDataWithLabels(tableName, SECRET + "&" + CONFIDENTIAL
      + "&!" + PRIVATE, SECRET + "&" + CONFIDENTIAL + "&" + PRIVATE)) {
    Get get = new Get(row1);
    get.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL));
    Result result = table.get(get);
    assertTrue(!result.isEmpty());
    Cell cell = result.getColumnLatestCell(fam, qual);
    assertTrue(Bytes.equals(value, 0, value.length, cell.getValueArray(), cell.getValueOffset(),
        cell.getValueLength()));
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:15,代碼來源:TestVisibilityLabels.java

示例6: extractAuths

import org.apache.hadoop.hbase.client.Result; //導入方法依賴的package包/類
protected List<String> extractAuths(String user, List<Result> results) {
  List<String> auths = new ArrayList<String>();
  for (Result result : results) {
    Cell labelCell = result.getColumnLatestCell(LABELS_TABLE_FAMILY, LABEL_QUALIFIER);
    Cell userAuthCell = result.getColumnLatestCell(LABELS_TABLE_FAMILY, user.getBytes());
    if (userAuthCell != null) {
      auths.add(Bytes.toString(labelCell.getValueArray(), labelCell.getValueOffset(),
          labelCell.getValueLength()));
    }
  }
  return auths;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:13,代碼來源:TestVisibilityLabels.java

示例7: assertEmptyMetaLocation

import org.apache.hadoop.hbase.client.Result; //導入方法依賴的package包/類
public static void assertEmptyMetaLocation(Table meta, byte[] row, int replicaId)
    throws IOException {
  Get get = new Get(row);
  Result result = meta.get(get);
  Cell serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
    MetaTableAccessor.getServerColumn(replicaId));
  Cell startCodeCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
    MetaTableAccessor.getStartCodeColumn(replicaId));
  assertNotNull(serverCell);
  assertNotNull(startCodeCell);
  assertEquals(0, serverCell.getValueLength());
  assertEquals(0, startCodeCell.getValueLength());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:14,代碼來源:TestMetaTableAccessor.java

示例8: testMastersSystemTimeIsUsedInUpdateLocations

import org.apache.hadoop.hbase.client.Result; //導入方法依賴的package包/類
/**
 * Tests whether maximum of masters system time versus RSs local system time is used
 */
@Test
public void testMastersSystemTimeIsUsedInUpdateLocations() throws IOException {
  long regionId = System.currentTimeMillis();
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf("table_foo"),
    HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false, regionId, 0);

  ServerName sn = ServerName.valueOf("bar", 0, 0);
  Table meta = MetaTableAccessor.getMetaHTable(connection);
  try {
    List<HRegionInfo> regionInfos = Lists.newArrayList(regionInfo);
    MetaTableAccessor.addRegionsToMeta(connection, regionInfos, 1);

    long masterSystemTime = EnvironmentEdgeManager.currentTime() + 123456789;
    MetaTableAccessor.updateRegionLocation(connection, regionInfo, sn, 1, masterSystemTime);

    Get get = new Get(regionInfo.getRegionName());
    Result result = meta.get(get);
    Cell serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
        MetaTableAccessor.getServerColumn(0));
    Cell startCodeCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
      MetaTableAccessor.getStartCodeColumn(0));
    Cell seqNumCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
      MetaTableAccessor.getSeqNumColumn(0));
    assertNotNull(serverCell);
    assertNotNull(startCodeCell);
    assertNotNull(seqNumCell);
    assertTrue(serverCell.getValueLength() > 0);
    assertTrue(startCodeCell.getValueLength() > 0);
    assertTrue(seqNumCell.getValueLength() > 0);
    assertEquals(masterSystemTime, serverCell.getTimestamp());
    assertEquals(masterSystemTime, startCodeCell.getTimestamp());
    assertEquals(masterSystemTime, seqNumCell.getTimestamp());
  } finally {
    meta.close();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:40,代碼來源:TestMetaTableAccessor.java

示例9: testAppendTimestampsAreMonotonic

import org.apache.hadoop.hbase.client.Result; //導入方法依賴的package包/類
@Test
public void testAppendTimestampsAreMonotonic() throws IOException {
  HRegion region = initHRegion(tableName, name.getMethodName(), CONF, fam1);
  ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
  EnvironmentEdgeManager.injectEdge(edge);

  edge.setValue(10);
  Append a = new Append(row);
  a.setDurability(Durability.SKIP_WAL);
  a.add(fam1, qual1, qual1);
  region.append(a);

  Result result = region.get(new Get(row));
  Cell c = result.getColumnLatestCell(fam1, qual1);
  assertNotNull(c);
  assertEquals(c.getTimestamp(), 10L);

  edge.setValue(1); // clock goes back
  region.append(a);
  result = region.get(new Get(row));
  c = result.getColumnLatestCell(fam1, qual1);
  assertEquals(c.getTimestamp(), 10L);

  byte[] expected = new byte[qual1.length*2];
  System.arraycopy(qual1, 0, expected, 0, qual1.length);
  System.arraycopy(qual1, 0, expected, qual1.length, qual1.length);

  assertTrue(Bytes.equals(c.getValueArray(), c.getValueOffset(), c.getValueLength(),
    expected, 0, expected.length));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:31,代碼來源:TestHRegion.java

示例10: verifyTestDataBatch

import org.apache.hadoop.hbase.client.Result; //導入方法依賴的package包/類
static void verifyTestDataBatch(Configuration conf, TableName tableName,
    int batchId) throws Exception {
  LOG.debug("Verifying test data batch " + batchId);
  Table table = new HTable(conf, tableName);
  for (int i = 0; i < NUM_ROWS_PER_BATCH; ++i) {
    Get get = new Get(getRowKey(batchId, i));
    Result result = table.get(get);
    for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
      Cell kv = result.getColumnLatestCell(CF_BYTES, getQualifier(j));
      assertTrue(CellUtil.matchingValue(kv, getValue(batchId, i, j)));
    }
  }
  table.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:15,代碼來源:TestChangingEncoding.java

示例11: getServerName

import org.apache.hadoop.hbase.client.Result; //導入方法依賴的package包/類
/**
 * Returns a {@link ServerName} from catalog table {@link Result}.
 * @param r Result to pull from
 * @return A ServerName instance or null if necessary fields not found or empty.
 */
private static ServerName getServerName(final Result r, final int replicaId) {
  byte[] serverColumn = getServerColumn(replicaId);
  Cell cell = r.getColumnLatestCell(getFamily(), serverColumn);
  if (cell == null || cell.getValueLength() == 0) return null;
  String hostAndPort = Bytes.toString(
    cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
  byte[] startcodeColumn = getStartCodeColumn(replicaId);
  cell = r.getColumnLatestCell(getFamily(), startcodeColumn);
  if (cell == null || cell.getValueLength() == 0) return null;
  return ServerName.valueOf(hostAndPort,
    Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:18,代碼來源:MetaTableAccessor.java

示例12: main

import org.apache.hadoop.hbase.client.Result; //導入方法依賴的package包/類
public static void main(String[] args) {
	try {
		Configuration conf = new Configuration();
		conf.set("fs.defaultFS", "hdfs://hadoop1:8020");
		conf.set("yarn.resourcemanager.hostname", "hadoop1");
		conf.set("hbase.zookeeper.quorum", "hadoop1,hadoop2,hadoop3");
		conf = HBaseConfiguration.create(conf);
		HTable table = new HTable(conf, "event_logs".getBytes());

		String date = "2016-03-23";
		long startDate = TimeUtil.parseString2Long(date);
		long endDate = startDate + GlobalConstants.DAY_OF_MILLISECONDS;
		System.out.println();
		Scan scan = new Scan();
		// 定義hbase掃描的開始rowkey和結束rowkey
		scan.setStartRow(Bytes.toBytes("" + startDate));
		scan.setStopRow(Bytes.toBytes("" + endDate));

		FilterList filterList = new FilterList();
		// 過濾數據,隻分析launch事件
		filterList.addFilter(new SingleColumnValueFilter(Bytes.toBytes(EventLogConstants.EVENT_LOGS_FAMILY_NAME),
				Bytes.toBytes(EventLogConstants.LOG_COLUMN_NAME_EVENT_NAME), CompareOp.EQUAL,
				Bytes.toBytes(EventEnum.LAUNCH.alias)));
		// 定義mapper中需要獲取的列名
		String[] columns = new String[] { EventLogConstants.LOG_COLUMN_NAME_EVENT_NAME,
				EventLogConstants.LOG_COLUMN_NAME_UUID, EventLogConstants.LOG_COLUMN_NAME_SERVER_TIME,
				EventLogConstants.LOG_COLUMN_NAME_PLATFORM, EventLogConstants.LOG_COLUMN_NAME_BROWSER_NAME,
				EventLogConstants.LOG_COLUMN_NAME_BROWSER_VERSION };
		// scan.addColumn(family, qualifier)
		filterList.addFilter(getColumnFilter(columns));

		scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(EventLogConstants.HBASE_NAME_EVENT_LOGS));
		scan.setFilter(filterList);

		ResultScanner ress = table.getScanner(scan);
		for (Result res : ress) {
			Cell cell = res.getColumnLatestCell("info".getBytes(),
					EventLogConstants.LOG_COLUMN_NAME_UUID.getBytes());
			System.out.println(new String(CellUtil.cloneValue(cell)));
		}
		ress.close();
	} catch (Exception e) {
		// TODO Auto-generated catch block
		e.printStackTrace();
	}
}
 
開發者ID:liuhaozzu,項目名稱:big_data,代碼行數:47,代碼來源:TestHbase.java

示例13: testMastersSystemTimeIsUsedInMergeRegions

import org.apache.hadoop.hbase.client.Result; //導入方法依賴的package包/類
@Test
public void testMastersSystemTimeIsUsedInMergeRegions() throws IOException {
  long regionId = System.currentTimeMillis();
  HRegionInfo regionInfoA = new HRegionInfo(TableName.valueOf("table_foo"),
    HConstants.EMPTY_START_ROW, new byte[] {'a'}, false, regionId, 0);
  HRegionInfo regionInfoB = new HRegionInfo(TableName.valueOf("table_foo"),
    new byte[] {'a'}, HConstants.EMPTY_END_ROW, false, regionId, 0);
  HRegionInfo mergedRegionInfo = new HRegionInfo(TableName.valueOf("table_foo"),
    HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false, regionId, 0);

  ServerName sn = ServerName.valueOf("bar", 0, 0);
  Table meta = MetaTableAccessor.getMetaHTable(connection);
  try {
    List<HRegionInfo> regionInfos = Lists.newArrayList(regionInfoA, regionInfoB);
    MetaTableAccessor.addRegionsToMeta(connection, regionInfos, 1);

    // write the serverName column with a big current time, but set the masters time as even
    // bigger. When region merge deletes the rows for regionA and regionB, the serverName columns
    // should not be seen by the following get
    long serverNameTime = EnvironmentEdgeManager.currentTime()   + 100000000;
    long masterSystemTime = EnvironmentEdgeManager.currentTime() + 123456789;

    // write the serverName columns
    MetaTableAccessor.updateRegionLocation(connection, regionInfoA, sn, 1, serverNameTime);

    // assert that we have the serverName column with expected ts
    Get get = new Get(mergedRegionInfo.getRegionName());
    Result result = meta.get(get);
    Cell serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
        MetaTableAccessor.getServerColumn(0));
    assertNotNull(serverCell);
    assertEquals(serverNameTime, serverCell.getTimestamp());

    // now merge the regions, effectively deleting the rows for region a and b.
    MetaTableAccessor.mergeRegions(connection, mergedRegionInfo,
      regionInfoA, regionInfoB, sn, 1, masterSystemTime);

    result = meta.get(get);
    serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
        MetaTableAccessor.getServerColumn(0));
    Cell startCodeCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
      MetaTableAccessor.getStartCodeColumn(0));
    Cell seqNumCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
      MetaTableAccessor.getSeqNumColumn(0));
    assertNull(serverCell);
    assertNull(startCodeCell);
    assertNull(seqNumCell);
  } finally {
    meta.close();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:52,代碼來源:TestMetaTableAccessor.java

示例14: testIncrementTimestampsAreMonotonic

import org.apache.hadoop.hbase.client.Result; //導入方法依賴的package包/類
@Test
public void testIncrementTimestampsAreMonotonic() throws IOException {
  HRegion region = initHRegion(tableName, name.getMethodName(), CONF, fam1);
  ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
  EnvironmentEdgeManager.injectEdge(edge);

  edge.setValue(10);
  Increment inc = new Increment(row);
  inc.setDurability(Durability.SKIP_WAL);
  inc.addColumn(fam1, qual1, 1L);
  region.increment(inc);

  Result result = region.get(new Get(row));
  Cell c = result.getColumnLatestCell(fam1, qual1);
  assertNotNull(c);
  assertEquals(c.getTimestamp(), 10L);

  edge.setValue(1); // clock goes back
  region.increment(inc);
  result = region.get(new Get(row));
  c = result.getColumnLatestCell(fam1, qual1);
  assertEquals(c.getTimestamp(), 10L);
  assertEquals(Bytes.toLong(c.getValueArray(), c.getValueOffset(), c.getValueLength()), 2L);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:25,代碼來源:TestHRegion.java

示例15: getSeqNumDuringOpen

import org.apache.hadoop.hbase.client.Result; //導入方法依賴的package包/類
/**
 * The latest seqnum that the server writing to meta observed when opening the region.
 * E.g. the seqNum when the result of {@link #getServerName(Result)} was written.
 * @param r Result to pull the seqNum from
 * @return SeqNum, or HConstants.NO_SEQNUM if there's no value written.
 * @deprecated use MetaTableAccessor methods for interacting with meta layouts
 */
@Deprecated
public static long getSeqNumDuringOpen(final Result r) {
  Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER);
  if (cell == null || cell.getValueLength() == 0) return HConstants.NO_SEQNUM;
  return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:14,代碼來源:HRegionInfo.java


注:本文中的org.apache.hadoop.hbase.client.Result.getColumnLatestCell方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。