本文整理汇总了Java中org.apache.hadoop.hbase.client.Result.listCells方法的典型用法代码示例。如果您正苦于以下问题:Java Result.listCells方法的具体用法?Java Result.listCells怎么用?Java Result.listCells使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.client.Result
的用法示例。
在下文中一共展示了Result.listCells方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: printResult
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private void printResult(Result result) {
long rowkey = Bytes.toInt(result.getRow());
// int a = Bytes.toInt(result.getValue(familyName, Bytes.toBytes("a")));
// int b = Bytes.toInt(result.getValue(familyName, Bytes.toBytes("b")));
// int c = Bytes.toInt(result.getValue(familyName, Bytes.toBytes("c")));
// int info = Bytes.toInt(result.getValue(familyName, Bytes.toBytes("info")));
StringBuilder sb = new StringBuilder();
sb.append("{");
sb.append("rowkey=").append(rowkey).append(",");
for (Cell cell : result.listCells()) {
sb.append(Bytes.toString(cell.getQualifier())).append("=")
.append(Bytes.toInt(cell.getValue())).append(",");
}
// sb.append("a=").append(a).append(",");
// sb.append("b=").append(b).append(",");
// sb.append("c=").append(c).append(",");
// sb.append("info=").append(info).append(",");
sb.append("}");
System.out.println(sb.toString());
}
示例2: extractKeyValues
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
* Extract columns values from the current record. This method returns
* null if any of the columns are not found.
*
* Override this method if you want to deal with nulls differently.
*
* @param r
* @return array of byte values
*/
protected byte[][] extractKeyValues(Result r) {
byte[][] keyVals = null;
ArrayList<byte[]> foundList = new ArrayList<byte[]>();
int numCols = columns.length;
if (numCols > 0) {
for (Cell value: r.listCells()) {
byte [] column = KeyValue.makeColumn(CellUtil.cloneFamily(value),
CellUtil.cloneQualifier(value));
for (int i = 0; i < numCols; i++) {
if (Bytes.equals(column, columns[i])) {
foundList.add(CellUtil.cloneValue(value));
break;
}
}
}
if(foundList.size() == numCols) {
keyVals = foundList.toArray(new byte[numCols][]);
}
}
return keyVals;
}
示例3: extractKeyValues
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
* Extract columns values from the current record. This method returns
* null if any of the columns are not found.
* <p>
* Override this method if you want to deal with nulls differently.
*
* @param r The current values.
* @return Array of byte values.
*/
protected byte[][] extractKeyValues(Result r) {
byte[][] keyVals = null;
ArrayList<byte[]> foundList = new ArrayList<byte[]>();
int numCols = columns.length;
if (numCols > 0) {
for (Cell value: r.listCells()) {
byte [] column = KeyValue.makeColumn(CellUtil.cloneFamily(value),
CellUtil.cloneQualifier(value));
for (int i = 0; i < numCols; i++) {
if (Bytes.equals(column, columns[i])) {
foundList.add(CellUtil.cloneValue(value));
break;
}
}
}
if(foundList.size() == numCols) {
keyVals = foundList.toArray(new byte[numCols][]);
}
}
return keyVals;
}
示例4: map
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
@Override
public void map(ImmutableBytesWritable key, Result value,
OutputCollector<NullWritable,NullWritable> output,
Reporter reporter) throws IOException {
for (Cell cell : value.listCells()) {
reporter.getCounter(TestTableInputFormat.class.getName() + ":row",
Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()))
.increment(1l);
reporter.getCounter(TestTableInputFormat.class.getName() + ":family",
Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()))
.increment(1l);
reporter.getCounter(TestTableInputFormat.class.getName() + ":value",
Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()))
.increment(1l);
}
}
示例5: map
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
@Override
public void map(ImmutableBytesWritable key, Result result,
Context context)
throws IOException {
List<Long> tsList = new ArrayList<Long>();
for (Cell kv : result.listCells()) {
tsList.add(kv.getTimestamp());
}
List<Put> puts = new ArrayList<>();
for (Long ts : tsList) {
Put put = new Put(key.get());
put.setDurability(Durability.SKIP_WAL);
put.add(FAMILY_NAME, COLUMN_NAME, ts, Bytes.toBytes(true));
puts.add(put);
}
table.put(puts);
}
示例6: runScanner
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private void runScanner(Table hTable, int expectedSize, Filter... filters) throws IOException {
String cf = "f";
Scan scan = new Scan();
scan.addFamily(cf.getBytes());
FilterList filterList = new FilterList(filters);
scan.setFilter(filterList);
ResultScanner scanner = hTable.getScanner(scan);
List<Cell> results = new ArrayList<Cell>();
Result result;
long timeBeforeScan = System.currentTimeMillis();
while ((result = scanner.next()) != null) {
for (Cell kv : result.listCells()) {
LOG.info("Got rk: " + Bytes.toStringBinary(CellUtil.cloneRow(kv)) + " cq: "
+ Bytes.toStringBinary(CellUtil.cloneQualifier(kv)));
results.add(kv);
}
}
long scanTime = System.currentTimeMillis() - timeBeforeScan;
scanner.close();
LOG.info("scan time = " + scanTime + "ms");
LOG.info("found " + results.size() + " results");
assertEquals(expectedSize, results.size());
}
示例7: getScanResult
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private List<Cell> getScanResult(byte[] startRow, byte[] stopRow, HTable ht) throws IOException {
Scan scan = new Scan();
scan.setMaxVersions();
if(!Bytes.toString(startRow).isEmpty()) {
scan.setStartRow(startRow);
}
if(!Bytes.toString(stopRow).isEmpty()) {
scan.setStopRow(stopRow);
}
ResultScanner scanner = ht.getScanner(scan);
List<Cell> kvList = new ArrayList<Cell>();
Result r;
while ((r = scanner.next()) != null) {
for (Cell kv : r.listCells()) {
kvList.add(kv);
}
}
return kvList;
}
示例8: resultToString
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
protected String resultToString(Result result) {
StringBuilder sb = new StringBuilder();
sb.append("{").append(keyToString(result.getRow())).append(":");
for (Cell cell : result.listCells()) {
byte[] f = CellUtil.cloneFamily(cell);
byte[] q = CellUtil.cloneQualifier(cell);
RangeDescription range = rangeMap.get(Bytes.add(f, q));
sb.append("[").append(Bytes.toString(f)).append(":").append(Bytes.toString(q)).append("->");
if (notPrintingSet.contains(q)) sb.append("skipped random value");
else sb.append(DataType.byteToString(range.dataType, CellUtil.cloneValue(cell)));
sb.append("]");
}
sb.append("}");
return sb.toString();
}
示例9: map
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
@Override
protected void map(ImmutableBytesWritable key, Result value, Context context)
throws IOException, InterruptedException {
BytesWritable bwKey = new BytesWritable(key.get());
BytesWritable bwVal = new BytesWritable();
for (Cell kv : value.listCells()) {
if (Bytes.compareTo(TEST_QUALIFIER, 0, TEST_QUALIFIER.length,
kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()) == 0) {
context.write(bwKey, EMPTY);
} else {
bwVal.set(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength());
context.write(bwVal, bwKey);
}
}
}
示例10: gotFailure
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private void gotFailure(byte[] expected, Result res) {
StringBuilder msg = new StringBuilder();
msg.append("Failed after ").append(numVerified).append("!");
msg.append("Expected=").append(Bytes.toStringBinary(expected));
msg.append("Got:\n");
for (Cell kv : res.listCells()) {
msg.append(kv.toString());
msg.append(" val= ");
msg.append(Bytes.toStringBinary(CellUtil.cloneValue(kv)));
msg.append("\n");
}
throw new RuntimeException(msg.toString());
}
示例11: createModelFromResults
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private CellSetModel createModelFromResults(Result[] results) {
CellSetModel cellSetModel = new CellSetModel();
for (Result rs : results) {
byte[] rowKey = rs.getRow();
RowModel rModel = new RowModel(rowKey);
List<Cell> kvs = rs.listCells();
for (Cell kv : kvs) {
rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), kv
.getTimestamp(), CellUtil.cloneValue(kv)));
}
cellSetModel.addRow(rModel);
}
return cellSetModel;
}
示例12: runScanner
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private void runScanner(Table hTable, int expectedSize, Filter filter1, Filter filter2)
throws IOException {
String cf = "f";
Scan scan = new Scan();
scan.addFamily(cf.getBytes());
FilterList filterList = new FilterList(Operator.MUST_PASS_ALL, filter1, filter2);
scan.setFilter(filterList);
ResultScanner scanner = hTable.getScanner(scan);
List<Cell> results = new ArrayList<Cell>();
Result result;
long timeBeforeScan = System.currentTimeMillis();
while ((result = scanner.next()) != null) {
for (Cell kv : result.listCells()) {
LOG.info("Got rk: " + Bytes.toStringBinary(CellUtil.cloneRow(kv)) + " cq: "
+ Bytes.toStringBinary(CellUtil.cloneQualifier(kv)));
results.add(kv);
}
}
long scanTime = System.currentTimeMillis() - timeBeforeScan;
scanner.close();
LOG.info("scan time = " + scanTime + "ms");
LOG.info("found " + results.size() + " results");
assertEquals(expectedSize, results.size());
}
示例13: getResultsSize
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private int getResultsSize(HTable ht, Scan scan) throws IOException {
ResultScanner scanner = ht.getScanner(scan);
List<Cell> results = new ArrayList<Cell>();
Result r;
while ((r = scanner.next()) != null) {
for (Cell kv : r.listCells()) {
results.add(kv);
}
}
return results.size();
}
示例14: testScanWithLimit
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
@Test
public void testScanWithLimit() {
int kv_number = 0;
try {
Scan scan = new Scan();
// set batch number as 2, which means each Result should contain 2 KVs at most
scan.setBatch(2);
SingleColumnValueFilter filter = new SingleColumnValueFilter(
Bytes.toBytes(columnFamily), Bytes.toBytes("c5"),
CompareFilter.CompareOp.EQUAL, new SubstringComparator("2_c5"));
// add filter after batch defined
scan.setFilter(filter);
Table table = openTable(tableName);
ResultScanner scanner = table.getScanner(scan);
// Expect to get following row
// row2 => <f1:c1, 2_c1>, <f1:c2, 2_c2>,
// row2 => <f1:c3, 2_c3>, <f1:c4, 2_c4>,
// row2 => <f1:c5, 2_c5>
for (Result result : scanner) {
for (Cell kv : result.listCells()) {
kv_number++;
LOG.debug(kv_number + ". kv: " + kv);
}
}
scanner.close();
table.close();
} catch (Exception e) {
// no correct result is expected
assertNotNull("No IncompatibleFilterException catched", e);
}
LOG.debug("check the fetched kv number");
assertEquals("We should not get result(s) returned.", 0, kv_number);
}
示例15: map
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
* Maps the data.
*
* @param row The current table row key.
* @param values The columns.
* @param context The current context.
* @throws IOException When something is broken with the data.
* @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN,
* org.apache.hadoop.mapreduce.Mapper.Context)
*/
@Override
public void map(ImmutableBytesWritable row, Result values,
Context context)
throws IOException {
Preconditions.checkState(values != null,
"values passed to the map is null");
String currentFamilyName = null;
String currentQualifierName = null;
String currentRowKey = null;
Configuration config = context.getConfiguration();
String separator = config.get("ReportSeparator",":");
try {
context.getCounter(Counters.ROWS).increment(1);
context.write(new Text("Total ROWS"), new IntWritable(1));
for (Cell value : values.listCells()) {
currentRowKey = Bytes.toStringBinary(CellUtil.cloneRow(value));
String thisRowFamilyName = Bytes.toStringBinary(CellUtil.cloneFamily(value));
if (!thisRowFamilyName.equals(currentFamilyName)) {
currentFamilyName = thisRowFamilyName;
context.getCounter("CF", thisRowFamilyName).increment(1);
if (1 == context.getCounter("CF", thisRowFamilyName).getValue()) {
context.write(new Text("Total Families Across all Rows"), new IntWritable(1));
context.write(new Text(thisRowFamilyName), new IntWritable(1));
}
}
String thisRowQualifierName = thisRowFamilyName + separator
+ Bytes.toStringBinary(CellUtil.cloneQualifier(value));
if (!thisRowQualifierName.equals(currentQualifierName)) {
currentQualifierName = thisRowQualifierName;
context.getCounter("CFQL", thisRowQualifierName).increment(1);
context.write(new Text("Total Qualifiers across all Rows"),
new IntWritable(1));
context.write(new Text(thisRowQualifierName), new IntWritable(1));
// Intialize versions
context.getCounter("QL_VERSIONS", currentRowKey + separator +
thisRowQualifierName).increment(1);
context.write(new Text(currentRowKey + separator
+ thisRowQualifierName + "_Versions"), new IntWritable(1));
} else {
// Increment versions
currentQualifierName = thisRowQualifierName;
context.getCounter("QL_VERSIONS", currentRowKey + separator +
thisRowQualifierName).increment(1);
context.write(new Text(currentRowKey + separator
+ thisRowQualifierName + "_Versions"), new IntWritable(1));
}
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}