当前位置: 首页>>代码示例>>Java>>正文


Java Result.getRow方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.Result.getRow方法的典型用法代码示例。如果您正苦于以下问题:Java Result.getRow方法的具体用法?Java Result.getRow怎么用?Java Result.getRow使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.client.Result的用法示例。


在下文中一共展示了Result.getRow方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getAll

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
 * 获取单张表的所有记录
 * 
 * @throws IOException
 */
public static Map<byte[], byte[]> getAll(String TableName, String ColumnFamily, String ColumnName)
		throws IOException {
	Map<byte[], byte[]> tableContent = new HashMap<byte[], byte[]>();
	Scan s = new Scan();
	s.addColumn(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
	s.setMaxVersions(1);
	s.setCacheBlocks(false);
	ResultScanner rs = hbase_table.getScanner(s);
	for (Result r : rs) {
		byte[] key = r.getRow();
		byte[] value = r.getValue(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
		tableContent.put(key, value);
	}
	rs.close();
	return tableContent;
}
 
开发者ID:ItGql,项目名称:SparkIsax,代码行数:22,代码来源:HBaseUtils.java

示例2: parseResult

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
@Override public String parseResult(Result result) {
  byte[] rowkey = result.getRow();
  // aId, uId, cId
  long orderKey = Bytes.toLong(rowkey, 4);
  long custKey = Bytes.toLong(result.getValue(FAMILY_NAME, Bytes.toBytes("ck")));
  String status = Bytes.toString(result.getValue(FAMILY_NAME, Bytes.toBytes("st")));
  int totalPrice = Bytes.toInt(result.getValue(FAMILY_NAME, Bytes.toBytes("t")));
  int date = Bytes.toInt(result.getValue(FAMILY_NAME, Bytes.toBytes("d")));
  int priority = Bytes.toInt(result.getValue(FAMILY_NAME, Bytes.toBytes("p")));
  String clerk = Bytes.toString(result.getValue(FAMILY_NAME, Bytes.toBytes("cl")));
  int shipPriority = Bytes.toInt(result.getValue(FAMILY_NAME, Bytes.toBytes("sh")));
  String comment = Bytes.toString(result.getValue(FAMILY_NAME, Bytes.toBytes("cm")));
  StringBuilder sb = new StringBuilder();
  sb.append("{");
  sb.append("orderKey=").append(orderKey).append(",");
  sb.append("custKey=").append(custKey).append(",");
  sb.append("status=").append(status).append(",");
  sb.append("totalPrice=").append(totalPrice).append(",");
  sb.append("date=").append(date).append(",");
  sb.append("priority=").append(priority).append(",");
  sb.append("clerk=").append(clerk).append(",");
  sb.append("shipPriority=").append(shipPriority).append(",");
  sb.append("comment=").append(comment).append("}");
  return sb.toString();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:TPCHWorkload.java

示例3: mapRow

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
@Override
public TraceAnnotationDto mapRow(Result res, int rowNum) throws Exception {
    String rowKey = new String(res.getRow());
    NavigableMap<byte[], byte[]> familyMap = res.getFamilyMap(TABLE_ANNOTATION_COLUMN_FAMILY.getBytes());


    String[] ss = rowKey.split("_");
    String iface = ss[0];
    String method = ss[1];
    String type = ss[2];
    long timestamp = Long.parseLong(ss[3]);

    String traceId = "", value = "";
    for (Map.Entry<byte[], byte[]> entry : familyMap.entrySet()) {
        traceId = new String(entry.getKey());
        value = new String(entry.getValue());
    }

    TraceAnnotationDto tad = new TraceAnnotationDto();
    tad.setRowKey(rowKey).setIface(iface).setMethod(method).setType(type).setTimestamp(timestamp);
    tad.setTraceId(traceId).setValue(value);
    return tad;
}
 
开发者ID:JThink,项目名称:SkyEye,代码行数:24,代码来源:TraceAnnotationDto.java

示例4: mapRow

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
@Override
public TraceTimeConsumeDto mapRow(Result res, int rowNum) throws Exception {
    TraceTimeConsumeDto dto = new TraceTimeConsumeDto();
    Map<byte[], byte[]> familyMap = res.getFamilyMap(Constants.TABLE_TIME_CONSUME_COLUMN_FAMILY.getBytes());
    Set<Map.Entry<byte[], byte[]>> entrySet = familyMap.entrySet();
    for (Map.Entry<byte[], byte[]> en : entrySet) {
        dto.setTraceId(new String(en.getKey())).setConsumeTime(RadixUtil.bytesToLong(en.getValue()));
    }
    String[] ss = new String(res.getRow()).split(Constants.UNDER_LINE);
    String iface = ss[0];
    String method = ss[1];
    Long startTime = Long.parseLong(ss[2]);
    Long endTime = startTime + dto.getConsumeTime();
    String rowKey = new String(res.getRow());

    dto.setIface(iface).setMethod(method).setStartTime(startTime).setEndTime(endTime).setRowKey(rowKey);
    return dto;
}
 
开发者ID:JThink,项目名称:SkyEye,代码行数:19,代码来源:TraceTimeConsumeRowMapper.java

示例5: doAnAction

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
public void doAnAction() throws Exception {
  Get g = new Get(targetRow);
  Result res = table.get(g);
  byte[] gotValue = null;
  if (res.getRow() == null) {
    // Trying to verify but we didn't find the row - the writing
    // thread probably just hasn't started writing yet, so we can
    // ignore this action
    return;
  }

  for (byte[] family : targetFamilies) {
    for (int i = 0; i < NUM_COLS_TO_CHECK; i++) {
      byte qualifier[] = Bytes.toBytes("col" + i);
      byte thisValue[] = res.getValue(family, qualifier);
      if (gotValue != null && !Bytes.equals(gotValue, thisValue)) {
        gotFailure(gotValue, res);
      }
      numVerified++;
      gotValue = thisValue;
    }
  }
  numRead.getAndIncrement();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:TestAcidGuarantees.java

示例6: createModelFromResults

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private CellSetModel createModelFromResults(Result[] results) {
  CellSetModel cellSetModel = new CellSetModel();
  for (Result rs : results) {
    byte[] rowKey = rs.getRow();
    RowModel rModel = new RowModel(rowKey);
    List<Cell> kvs = rs.listCells();
    for (Cell kv : kvs) {
      rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), kv
          .getTimestamp(), CellUtil.cloneValue(kv)));
    }
    cellSetModel.addRow(rModel);
  }
  return cellSetModel;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:ProtobufStreamingUtil.java

示例7: mapRow

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
@Override
    public BaseDataPoint mapRow(Result result, int rowNum) throws Exception {
        if (result.isEmpty()) {
//            return Collections.emptyList();
            return null;
        }
        final byte[] distributedRowKey = result.getRow();
//        List<BaseDataPoint> dataPoints = new ArrayList<>();
        EasyHBaseBo bo = new EasyHBaseBo();
        for (Cell cell : result.rawCells()) {
            if (CellUtil.matchingFamily(cell, HBaseTables.EASYHBASE_CF)) {

                bo.setRowkey(Bytes.toString(cell.getRow()));
                bo.setValue(Bytes.toString(cell.getValue()));
                bo.setTimestamp(cell.getTimestamp());
//                dataPoints.add(bo);
//                List<T> candidates = new ArrayList<>();
//                for (T candidate : candidates) {
//                    candidate.setRowkey(candidate.getRowkey());
//                    candidate.setValue(candidate.getValue());
//                    candidate.setTimestamp(candidate.getTimestamp());
//                    dataPoints.add(candidate);
//                }
            }
        }
        // Reverse sort as timestamp is stored in a reversed order.
//        Collections.sort(dataPoints, REVERSE_TIMESTAMP_COMPARATOR);
        return bo;
    }
 
开发者ID:fchenxi,项目名称:easyhbase,代码行数:30,代码来源:EasyHBaseMapperV2.java

示例8: testExpectedNumberOfCellsPerPartialResult

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
public void testExpectedNumberOfCellsPerPartialResult(Scan baseScan, int expectedNumberOfCells)
    throws Exception {

  if (LOG.isInfoEnabled()) LOG.info("groupSize:" + expectedNumberOfCells);

  // Use the cellHeapSize to set maxResultSize such that we know how many cells to expect back
  // from the call. The returned results should NOT exceed expectedNumberOfCells but may be less
  // than it in cases where expectedNumberOfCells is not an exact multiple of the number of
  // columns in the table.
  Scan scan = new Scan(baseScan);
  scan.setAllowPartialResults(true);
  scan.setMaxResultSize(getResultSizeForNumberOfCells(expectedNumberOfCells));

  ResultScanner scanner = TABLE.getScanner(scan);
  Result result = null;
  byte[] prevRow = null;
  while ((result = scanner.next()) != null) {
    assertTrue(result.rawCells() != null);

    // Cases when cell count won't equal expectedNumberOfCells:
    // 1. Returned result is the final result needed to form the complete result for that row
    // 2. It is the first result we have seen for that row and thus may have been fetched as
    // the last group of cells that fit inside the maxResultSize
    assertTrue(
        "Result's cell count differed from expected number. result: " + result,
        result.rawCells().length == expectedNumberOfCells || !result.isPartial()
            || !Bytes.equals(prevRow, result.getRow()));
    prevRow = result.getRow();
  }

  scanner.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:TestPartialResultsFromClientSide.java

示例9: notifyInsertion

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
 * @param row
 * @throws IOException
 */
void notifyInsertion(byte[] row, long newAdded) throws IOException {
  Result bucketEntry = getRowOrBefore(bucketTable, row, MDHBaseAdmin.BUCKET_FAMILY);
  byte[] bucketKey = bucketEntry.getRow();
  long startTime = System.currentTimeMillis();
  long size = bucketTable.incrementColumnValue(bucketKey, MDHBaseAdmin.BUCKET_FAMILY,
      MDHBaseAdmin.BUCKET_SIZE_QUALIFIER, newAdded);
  incBucketTotalCount++;
  incBucketTotalTime += System.currentTimeMillis() - startTime;
  maySplit(bucketKey, size);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:MDIndex.java

示例10: map

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
@Override
protected void map(ImmutableBytesWritable key, Result value, Context context)
    throws IOException, InterruptedException {
  byte[] row = value.getRow();
  Counter c = getCounter(row);
  c.increment(1);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:IntegrationTestWithCellVisibilityLoadAndVerify.java

示例11: insertOneRecord

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
@Override protected void insertOneRecord(AbstractDITBRecord record) throws IOException {
  // note, MD-HBase get before update, we summarize the time in get seperately
  MDPoint point = record.toMDPoint();
  byte[] row = MDUtils.bitwiseZip(point.values, mdAdmin.getDimensions());
  // get before row
  long startTime = System.currentTimeMillis();
  Scan scan = new Scan();
  scan.addFamily(MDHBaseAdmin.BUCKET_FAMILY);
  scan.setReversed(true);
  scan.setStartRow(row);
  scan.setCacheBlocks(false);
  scan.setCaching(1);
  scan.setSmall(true);
  ResultScanner scanner = table.getScanner(scan);
  Result result = scanner.next();
  scanner.close();
  gbuTime += System.currentTimeMillis() - startTime;
  gbuCount++;
  // default scan
  if (result == null) {
    row = mdAdmin.getBucketSuffixRow(point);
  } else {
    row = result.getRow();
  }
  table.incrementColumnValue(row, MDHBaseAdmin.BUCKET_FAMILY,
      MDHBaseAdmin.BUCKET_SIZE_QUALIFIER, 1);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:PerfMD.java

示例12: deleteTableData

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
 * Provide an existing table name to truncate.
 * Scans the table and issues a delete for each row read.
 * @param tableName existing table
 * @return HTable to that new table
 * @throws IOException
 */
public HTable deleteTableData(TableName tableName) throws IOException {
  HTable table = new HTable(getConfiguration(), tableName);
  Scan scan = new Scan();
  ResultScanner resScan = table.getScanner(scan);
  for(Result res : resScan) {
    Delete del = new Delete(res.getRow());
    table.delete(del);
  }
  resScan = table.getScanner(scan);
  resScan.close();
  return table;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:HBaseTestingUtility.java

示例13: splitBucket

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private void splitBucket(byte[] splitKey) throws IOException {
  Result bucketEntry = getRowOrBefore(bucketTable, splitKey, MDHBaseAdmin.BUCKET_FAMILY);
  byte[] bucketKey = bucketEntry.getRow();
  int prefixLength = Bytes.toInt(
      bucketEntry.getValue(MDHBaseAdmin.BUCKET_FAMILY, MDHBaseAdmin.BUCKET_PREFIX_LEN_QUALIFIER));
  long bucketSize = Bytes.toLong(
      bucketEntry.getValue(MDHBaseAdmin.BUCKET_FAMILY, MDHBaseAdmin.BUCKET_SIZE_QUALIFIER));
  int newPrefixLength = prefixLength + 1;
  if (newPrefixLength > 32 * 2) {
    return; // exceeds the maximum prefix length.
  }
  byte[] newChildKey0 = bucketKey;
  byte[] newChildKey1 = MDUtils.makeBit(bucketKey, prefixLength, dimensions);
  Scan scan = new Scan(newChildKey0, newChildKey1);
  scan.addFamily(MDHBaseAdmin.SECONDARY_FAMILY);
  scan.setCaching(1000);
  ResultScanner scanner = secondaryTable.getScanner(scan);
  long newSize = 0L;
  for (Result result : scanner) {
    newSize += result.getFamilyMap(MDHBaseAdmin.SECONDARY_FAMILY).size();
  }
  splitTimes++;
  scanner.close();
  Put put0 = new Put(newChildKey0);
  put0.addColumn(MDHBaseAdmin.BUCKET_FAMILY, MDHBaseAdmin.BUCKET_PREFIX_LEN_QUALIFIER,
      Bytes.toBytes(newPrefixLength));
  put0.addColumn(MDHBaseAdmin.BUCKET_FAMILY, MDHBaseAdmin.BUCKET_SIZE_QUALIFIER,
      Bytes.toBytes(newSize));
  Put put1 = new Put(newChildKey1);
  put1.addColumn(MDHBaseAdmin.BUCKET_FAMILY, MDHBaseAdmin.BUCKET_PREFIX_LEN_QUALIFIER,
      Bytes.toBytes(newPrefixLength));
  put1.addColumn(MDHBaseAdmin.BUCKET_FAMILY, MDHBaseAdmin.BUCKET_SIZE_QUALIFIER,
      Bytes.toBytes(bucketSize - newSize));
  List<Put> puts = new ArrayList<>(2);
  puts.add(put0);
  puts.add(put1);
  bucketTable.put(puts);
  maySplit(newChildKey0, newSize);
  maySplit(newChildKey1, bucketSize - newSize);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:41,代码来源:MDIndex.java

示例14: fetchBucket

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
 * fetches a bucket which holds the queried row.
 *
 * @param row a queried row key
 * @return a bucket which holds the queried row.
 * @throws IOException
 */
public MDBucket fetchBucket(byte[] row) throws IOException {
  Result bucketEntry = getRowOrBefore(bucketTable, row, MDHBaseAdmin.BUCKET_FAMILY);
  byte[] bucketKey = bucketEntry.getRow();
  // prefix length has been written while creating table, defined to #dimension
  int prefixLength = Bytes.toInt(
      bucketEntry.getValue(MDHBaseAdmin.BUCKET_FAMILY, MDHBaseAdmin.BUCKET_PREFIX_LEN_QUALIFIER));
  MDRange[] ranges = toRanges(bucketKey, prefixLength);
  return createBucket(ranges);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:MDIndex.java

示例15: findBucketsInRange

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
 * finds buckets which intersect with the query region.
 *
 * @param ranges
 * @return
 * @throws IOException
 */
public Iterable<MDBucket> findBucketsInRange(MDRange[] ranges, int cacheSize) throws IOException {
  int[] mins = new int[ranges.length];
  int[] maxs = new int[ranges.length];
  for (int i = 0; i < ranges.length; i++) {
    mins[i] = ranges[i].min;
    maxs[i] = ranges[i].max;
  }
  byte[] probeKey = MDUtils.bitwiseZip(mins, dimensions);
  Result bucketEntry = getRowOrBefore(bucketTable, probeKey, MDHBaseAdmin.BUCKET_FAMILY);
  byte[] startKey = bucketEntry.getRow();
  //    byte[] stopKey = Bytes.incrementBytes(MDUtils.bitwiseZip(maxs), 1L);
  byte[] stopKey = MDUtils.increment(MDUtils.bitwiseZip(maxs, dimensions));
  Scan scan = new Scan(startKey, stopKey);
  scan.addFamily(MDHBaseAdmin.BUCKET_FAMILY);
  scan.setCaching(cacheSize);
  ResultScanner scanner = bucketTable.getScanner(scan);
  List<MDBucket> hitBuckets = new LinkedList<>();
  long startTime = System.currentTimeMillis();
  int counter = 0;
  for (Result result : scanner) {
    ++counter;
    byte[] row = result.getRow();
    int pl = Bytes.toInt(
        result.getValue(MDHBaseAdmin.BUCKET_FAMILY, MDHBaseAdmin.BUCKET_PREFIX_LEN_QUALIFIER));
    MDRange[] rs = toRanges(row, pl);
    boolean intersect = true;
    for (int i = 0; i < rs.length; i++) {
      if (!ranges[i].intersect(rs[i])) {
        intersect = false;
        break;
      }
    }
    if (intersect) {
      hitBuckets.add(createBucket(rs));
    }
  }
  lastScanTotalNumber = counter;
  lastScanTotalTime = System.currentTimeMillis() - startTime;
  scanner.close();
  return hitBuckets;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:49,代码来源:MDIndex.java


注:本文中的org.apache.hadoop.hbase.client.Result.getRow方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。