当前位置: 首页>>代码示例>>Java>>正文


Java Result.rawCells方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.Result.rawCells方法的典型用法代码示例。如果您正苦于以下问题:Java Result.rawCells方法的具体用法?Java Result.rawCells怎么用?Java Result.rawCells使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.client.Result的用法示例。


在下文中一共展示了Result.rawCells方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: resultFromHBase

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
 * Creates a {@link TResult} (Thrift) from a {@link Result} (HBase).
 *
 * @param in the <code>Result</code> to convert
 *
 * @return converted result, returns an empty result if the input is <code>null</code>
 */
public static TResult resultFromHBase(Result in) {
  Cell[] raw = in.rawCells();
  TResult out = new TResult();
  byte[] row = in.getRow();
  if (row != null) {
    out.setRow(in.getRow());
  }
  List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
  for (Cell kv : raw) {
    TColumnValue col = new TColumnValue();
    col.setFamily(CellUtil.cloneFamily(kv));
    col.setQualifier(CellUtil.cloneQualifier(kv));
    col.setTimestamp(kv.getTimestamp());
    col.setValue(CellUtil.cloneValue(kv));
    if (kv.getTagsLength() > 0) {
      col.setTags(CellUtil.getTagArray(kv));
    }
    columnValues.add(col);
  }
  out.setColumnValues(columnValues);
  return out;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:ThriftUtilities.java

示例2: parsePermissions

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private static ListMultimap<String, TablePermission> parsePermissions(
    byte[] entryName, Result result) {
  ListMultimap<String, TablePermission> perms = ArrayListMultimap.create();
  if (result != null && result.size() > 0) {
    for (Cell kv : result.rawCells()) {

      Pair<String,TablePermission> permissionsOfUserOnTable =
          parsePermissionRecord(entryName, kv);

      if (permissionsOfUserOnTable != null) {
        String username = permissionsOfUserOnTable.getFirst();
        TablePermission permissions = permissionsOfUserOnTable.getSecond();
        perms.put(username, permissions);
      }
    }
  }
  return perms;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:AccessControlLists.java

示例3: hashResult

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
public void hashResult(Result result) {
  if (!batchStarted) {
    throw new RuntimeException("Cannot add to batch that has not been started.");
  }
  for (Cell cell : result.rawCells()) {
    int rowLength = cell.getRowLength();
    int familyLength = cell.getFamilyLength();
    int qualifierLength = cell.getQualifierLength();
    int valueLength = cell.getValueLength();
    digest.update(cell.getRowArray(), cell.getRowOffset(), rowLength);
    digest.update(cell.getFamilyArray(), cell.getFamilyOffset(), familyLength);
    digest.update(cell.getQualifierArray(), cell.getQualifierOffset(), qualifierLength);
    long ts = cell.getTimestamp();
    for (int i = 8; i > 0; i--) {
      digest.update((byte) ts);
      ts >>>= 8;
    }
    digest.update(cell.getValueArray(), cell.getValueOffset(), valueLength);
    
    batchSize += rowLength + familyLength + qualifierLength + 8 + valueLength;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:HashTable.java

示例4: map

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
 * @param row  The current table row key.
 * @param value  The columns.
 * @param context  The current context.
 * @throws IOException When something is broken with the data.
 */
@Override
public void map(ImmutableBytesWritable row, Result value,
  Context context)
throws IOException {
  try {
    if (LOG.isTraceEnabled()) {
      LOG.trace("Considering the row."
          + Bytes.toString(row.get(), row.getOffset(), row.getLength()));
    }
    if (filter == null || !filter.filterRowKey(row.get(), row.getOffset(), row.getLength())) {
      for (Cell kv : value.rawCells()) {
        kv = filterKv(filter, kv);
        // skip if we filtered it out
        if (kv == null) continue;
        // TODO get rid of ensureKeyValue
        context.write(row, KeyValueUtil.ensureKeyValueTypeForMR(convertKv(kv, cfRenameMap)));
      }
    }
  } catch (InterruptedException e) {
    e.printStackTrace();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:Import.java

示例5: addSize

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
 * Method to account for the size of retained cells and retained data blocks.
 * @return an object that represents the last referenced block from this response.
 */
Object addSize(RpcCallContext context, Result r, Object lastBlock) {
  if (context != null && !r.isEmpty()) {
    for (Cell c : r.rawCells()) {
      context.incrementResponseCellSize(CellUtil.estimatedHeapSizeOf(c));
      // We're using the last block being the same as the current block as
      // a proxy for pointing to a new block. This won't be exact.
      // If there are multiple gets that bounce back and forth
      // Then it's possible that this will over count the size of
      // referenced blocks. However it's better to over count and
      // use two RPC's than to OOME the RegionServer.
      byte[] valueArray = c.getValueArray();
      if (valueArray != lastBlock) {
        context.incrementResponseBlockSize(valueArray.length);
        lastBlock = valueArray;
      }
    }
  }
  return lastBlock;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:RSRpcServices.java

示例6: verifyData

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
static void verifyData(HRegion newReg, int startRow, int numRows, byte[] qf, byte[]... families)
    throws IOException {
  for (int i = startRow; i < startRow + numRows; i++) {
    byte[] row = Bytes.toBytes("" + i);
    Get get = new Get(row);
    for (byte[] family : families) {
      get.addColumn(family, qf);
    }
    Result result = newReg.get(get);
    Cell[] raw = result.rawCells();
    assertEquals(families.length, result.size());
    for (int j = 0; j < families.length; j++) {
      assertTrue(CellUtil.matchingRow(raw[j], row));
      assertTrue(CellUtil.matchingFamily(raw[j], families[j]));
      assertTrue(CellUtil.matchingQualifier(raw[j], qf));
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestHRegion.java

示例7: assertICV

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private void assertICV(byte [] row,
                       byte [] familiy,
                       byte[] qualifier,
                       long amount) throws IOException {
  // run a get and see?
  Get get = new Get(row);
  get.addColumn(familiy, qualifier);
  Result result = region.get(get);
  assertEquals(1, result.size());

  Cell kv = result.rawCells()[0];
  long r = Bytes.toLong(CellUtil.cloneValue(kv));
  assertEquals(amount, r);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:TestAtomicOperation.java

示例8: verifyResult

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
 * Verifies that result contains all the key values within expKvList. Fails the test otherwise
 * @param result
 * @param expKvList
 * @param msg
 */
static void verifyResult(Result result, List<Cell> expKvList, String msg) {
  if (LOG.isInfoEnabled()) {
    LOG.info(msg);
    LOG.info("Expected count: " + expKvList.size());
    LOG.info("Actual count: " + result.size());
  }

  if (expKvList.size() == 0) return;

  int i = 0;
  for (Cell kv : result.rawCells()) {
    if (i >= expKvList.size()) {
      break; // we will check the size later
    }

    Cell kvExp = expKvList.get(i++);
    assertTrue("Not equal. get kv: " + kv.toString() + " exp kv: " + kvExp.toString(),
        kvExp.equals(kv));
  }

  assertEquals(expKvList.size(), result.size());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:TestPartialResultsFromClientSide.java

示例9: print

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
 * 格式化输出
 *
 * @param results
 */
public static void print(Result... results) {
    log.info("查询结果:");
    for (Result result : results) {
        Cell[] cells = result.rawCells();
        for (Cell cell : cells) {
            log.info("rowkey:" + new String(CellUtil.cloneRow(cell)) + ","
                    + "columnFamily:" + new String(CellUtil.cloneFamily(cell)) + ","
                    + "qualifier:" + new String(CellUtil.cloneQualifier(cell)) + ","
                    + "Timetamp:" + cell.getTimestamp() + ","
                    + "value:" + new String(CellUtil.cloneValue(cell)) + ",");
        }
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-hbase,代码行数:19,代码来源:HBaseResultUtil.java

示例10: testReversedPartialResultWhenRegionMove

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
@Test
public void testReversedPartialResultWhenRegionMove() throws IOException {
  Table table=createTestTable(TableName.valueOf("testReversedPartialResultWhenRegionMove"),
      ROWS, FAMILIES, QUALIFIERS, VALUE);

  moveRegion(table, 1);

  Scan scan = new Scan();
  scan.setMaxResultSize(1);
  scan.setAllowPartialResults(true);
  scan.setReversed(true);
  ResultScanner scanner = table.getScanner(scan);
  for (int i = 0; i < NUM_FAMILIES * NUM_QUALIFIERS-1; i++) {
    scanner.next();
  }
  Result result1 = scanner.next();
  assertEquals(1, result1.rawCells().length);
  Cell c1 = result1.rawCells()[0];
  assertCell(c1, ROWS[NUM_ROWS-1], FAMILIES[NUM_FAMILIES - 1], QUALIFIERS[NUM_QUALIFIERS - 1]);
  assertFalse(result1.isPartial());

  moveRegion(table, 2);

  Result result2 = scanner.next();
  assertEquals(1, result2.rawCells().length);
  Cell c2 = result2.rawCells()[0];
  assertCell(c2, ROWS[NUM_ROWS-2], FAMILIES[0], QUALIFIERS[0]);
  assertTrue(result2.isPartial());

  moveRegion(table, 3);

  Result result3 = scanner.next();
  assertEquals(1, result3.rawCells().length);
  Cell c3 = result3.rawCells()[0];
  assertCell(c3, ROWS[NUM_ROWS-2], FAMILIES[0], QUALIFIERS[1]);
  assertTrue(result3.isPartial());

}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:TestPartialResultsFromClientSide.java

示例11: calculateResultSize

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
public static long calculateResultSize(final Result result) {
  long size = 0;
  for (Cell cell : result.rawCells()) {
    size += KeyValueUtil.length(cell);
  }
  return size;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:QuotaUtil.java

示例12: testReversedCompleteResultWhenRegionMove

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
@Test
public void testReversedCompleteResultWhenRegionMove() throws IOException {
  Table table=createTestTable(TableName.valueOf("testReversedCompleteResultWhenRegionMove"),
      ROWS, FAMILIES, QUALIFIERS, VALUE);

  moveRegion(table, 1);

  Scan scan = new Scan();
  scan.setMaxResultSize(1);
  scan.setCaching(1);
  scan.setReversed(true);
  ResultScanner scanner = table.getScanner(scan);

  Result result1 = scanner.next();
  assertEquals(NUM_FAMILIES*NUM_QUALIFIERS, result1.rawCells().length);
  Cell c1 = result1.rawCells()[0];
  assertCell(c1, ROWS[NUM_ROWS-1], FAMILIES[0], QUALIFIERS[0]);
  assertFalse(result1.isPartial());

  moveRegion(table, 2);

  Result result2 = scanner.next();
  assertEquals(NUM_FAMILIES*NUM_QUALIFIERS, result2.rawCells().length);
  Cell c2 = result2.rawCells()[0];
  assertCell(c2, ROWS[NUM_ROWS-2], FAMILIES[0], QUALIFIERS[0]);
  assertFalse(result2.isPartial());

  moveRegion(table, 3);

  Result result3 = scanner.next();
  assertEquals(NUM_FAMILIES*NUM_QUALIFIERS, result3.rawCells().length);
  Cell c3 = result3.rawCells()[0];
  assertCell(c3, ROWS[NUM_ROWS-3], FAMILIES[0], QUALIFIERS[0]);
  assertFalse(result3.isPartial());

}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:37,代码来源:TestPartialResultsFromClientSide.java

示例13: assertGet

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private static void assertGet(final HRegion region, byte [] row, byte [] familiy,
    byte[] qualifier, byte[] value) throws IOException {
  // run a get and see if the value matches
  Get get = new Get(row);
  get.addColumn(familiy, qualifier);
  Result result = region.get(get);
  assertEquals(1, result.size());

  Cell kv = result.rawCells()[0];
  byte[] r = CellUtil.cloneValue(kv);
  assertTrue(Bytes.compareTo(r, value) == 0);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:TestParallelPut.java

示例14: countCellsFromScanner

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
 * Exhausts the scanner by calling next repetitively. Once completely exhausted, close scanner and
 * return total cell count
 * @param scanner
 * @return
 * @throws Exception
 */
private int countCellsFromScanner(ResultScanner scanner) throws Exception {
  Result result = null;
  int numCells = 0;
  while ((result = scanner.next()) != null) {
    numCells += result.rawCells().length;
  }

  scanner.close();
  return numCells;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:TestPartialResultsFromClientSide.java

示例15: testResettingCounters

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
@Test
public void testResettingCounters() throws Exception {

  HBaseTestingUtility htu = new HBaseTestingUtility();
  Configuration conf = htu.getConfiguration();
  FileSystem fs = FileSystem.get(conf);
  byte [] table = Bytes.toBytes("table");
  byte [][] families = new byte [][] {
      Bytes.toBytes("family1"),
      Bytes.toBytes("family2"),
      Bytes.toBytes("family3")
  };
  int numQualifiers = 10;
  byte [][] qualifiers = new byte [numQualifiers][];
  for (int i=0; i<numQualifiers; i++) qualifiers[i] = Bytes.toBytes("qf" + i);
  int numRows = 10;
  byte [][] rows = new byte [numRows][];
  for (int i=0; i<numRows; i++) rows[i] = Bytes.toBytes("r" + i);

  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
  for (byte [] family : families) htd.addFamily(new HColumnDescriptor(family));

  HRegionInfo hri = new HRegionInfo(htd.getTableName(), null, null, false);
  String testDir = htu.getDataTestDir() + "/TestResettingCounters/";
  Path path = new Path(testDir);
  if (fs.exists(path)) {
    if (!fs.delete(path, true)) {
      throw new IOException("Failed delete of " + path);
    }
  }
  HRegion region = HRegion.createHRegion(hri, path, conf, htd);
  try {
    Increment odd = new Increment(rows[0]);
    odd.setDurability(Durability.SKIP_WAL);
    Increment even = new Increment(rows[0]);
    even.setDurability(Durability.SKIP_WAL);
    Increment all = new Increment(rows[0]);
    all.setDurability(Durability.SKIP_WAL);
    for (int i=0;i<numQualifiers;i++) {
      if (i % 2 == 0) even.addColumn(families[0], qualifiers[i], 1);
      else odd.addColumn(families[0], qualifiers[i], 1);
      all.addColumn(families[0], qualifiers[i], 1);
    }

    // increment odd qualifiers 5 times and flush
    for (int i=0;i<5;i++) region.increment(odd, HConstants.NO_NONCE, HConstants.NO_NONCE);
    region.flush(true);

    // increment even qualifiers 5 times
    for (int i=0;i<5;i++) region.increment(even, HConstants.NO_NONCE, HConstants.NO_NONCE);

    // increment all qualifiers, should have value=6 for all
    Result result = region.increment(all, HConstants.NO_NONCE, HConstants.NO_NONCE);
    assertEquals(numQualifiers, result.size());
    Cell [] kvs = result.rawCells();
    for (int i=0;i<kvs.length;i++) {
      System.out.println(kvs[i].toString());
      assertTrue(CellUtil.matchingQualifier(kvs[i], qualifiers[i]));
      assertEquals(6, Bytes.toLong(CellUtil.cloneValue(kvs[i])));
    }
  } finally {
    HRegion.closeHRegion(region);
  }
  HRegion.closeHRegion(region);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:66,代码来源:TestResettingCounters.java


注:本文中的org.apache.hadoop.hbase.client.Result.rawCells方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。