当前位置: 首页>>代码示例>>Java>>正文


Java FirstKeyOnlyFilter类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter的典型用法代码示例。如果您正苦于以下问题:Java FirstKeyOnlyFilter类的具体用法?Java FirstKeyOnlyFilter怎么用?Java FirstKeyOnlyFilter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


FirstKeyOnlyFilter类属于org.apache.hadoop.hbase.filter包,在下文中一共展示了FirstKeyOnlyFilter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testFirstKeyFilter

import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; //导入依赖的package包/类
@Test
public void testFirstKeyFilter() throws IOException {
  // Initialize
  int numCols = 5;
  String columnValue = "includeThisValue";
  Table table = getConnection().getTable(TABLE_NAME);
  byte[] rowKey = dataHelper.randomData("testRow-");
  Put put = new Put(rowKey);
  for (int i = 0; i < numCols; ++i) {
    put.addColumn(COLUMN_FAMILY, dataHelper.randomData(""), Bytes.toBytes(columnValue));
  }
  table.put(put);

  // Filter for results
  Filter filter = new FirstKeyOnlyFilter();

  Get get = new Get(rowKey).setFilter(filter);
  Result result = table.get(get);
  Assert.assertEquals("Should only return 1 keyvalue", 1, result.size());

  table.close();
}
 
开发者ID:dmmcerlean,项目名称:cloud-bigtable-client,代码行数:23,代码来源:TestFilters.java

示例2: getKeys

import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; //导入依赖的package包/类
/**
 * Gets the whole set of keys in a table
 * @return The list of keys (byte[]) as objects
 */
public List<Object> getKeys() {
	List<Object> keys = new ArrayList<Object>();
	try {
		Scan scan = new Scan();
		scan.setFilter(new FirstKeyOnlyFilter());
		ResultScanner scanner = table.getScanner(scan);
		for (Result rr : scanner) {
		  byte[] key = rr.getRow();
		  keys.add(key);
		}
		return keys;
	} catch (IOException e) {
		e.printStackTrace();
		return keys;
	}
}
 
开发者ID:QualiMaster,项目名称:Infrastructure,代码行数:21,代码来源:HBaseStorageSupport.java

示例3: isReallyEmptyRegion

import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; //导入依赖的package包/类
public static boolean isReallyEmptyRegion(HConnection connection,
    String tableName, HRegionInfo regionInfo) throws IOException {
    boolean emptyRegion = false;
    // verify really empty region by scanning records
    try (HTableInterface table = connection.getTable(tableName)) {
        Scan scan = new Scan(regionInfo.getStartKey(), regionInfo.getEndKey());
        FilterList filterList = new FilterList();
        filterList.addFilter(new KeyOnlyFilter());
        filterList.addFilter(new FirstKeyOnlyFilter());
        scan.setFilter(filterList);
        scan.setCacheBlocks(false);
        scan.setSmall(true);
        scan.setCaching(1);

        try (ResultScanner scanner = table.getScanner(scan)) {
            if (scanner.next() == null) emptyRegion = true;
        }
    }
    return emptyRegion;
}
 
开发者ID:kakao,项目名称:hbase-tools,代码行数:21,代码来源:CommandAdapter.java

示例4: run

import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; //导入依赖的package包/类
@Override
public void run() {
    try (HTableInterface table = connection.getTable(tableName.getBytes())) {
        // Do not use Get not to increase read request count metric.
        // Use Scan.
        Scan scan = new Scan("".getBytes(), "".getBytes());
        FilterList filterList = new FilterList();
        filterList.addFilter(new KeyOnlyFilter());
        filterList.addFilter(new FirstKeyOnlyFilter());
        scan.setFilter(filterList);
        //noinspection EmptyTryBlock
        try(ResultScanner ignored = table.getScanner(scan)) {
        }
        return;
    } catch (IOException ignore) {
    }

    clean(tableName);
}
 
开发者ID:kakao,项目名称:hbase-tools,代码行数:20,代码来源:RegionLocationCleaner.java

示例5: isSuccessfulScan

import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; //导入依赖的package包/类
/**
 * Tries to scan a row from passed region
 * @param admin
 * @param region
 * @throws IOException
 */
private void isSuccessfulScan(Admin admin, RegionInfo region) throws IOException {
  Scan scan = new Scan(region.getStartKey());
  scan.setBatch(1);
  scan.setCaching(1);
  scan.setFilter(new FirstKeyOnlyFilter());
  try {
    Table table = admin.getConnection().getTable(region.getTable());
    try {
      ResultScanner scanner = table.getScanner(scan);
      try {
        scanner.next();
      } finally {
        scanner.close();
      }
    } finally {
      table.close();
    }
  } catch (IOException e) {
    LOG.error("Could not scan region:" + region.getEncodedName(), e);
    throw e;
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:29,代码来源:RegionMover.java

示例6: count

import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; //导入依赖的package包/类
public static void count() {
		long begin = System.currentTimeMillis();
		AggregationClient ac = new AggregationClient(conf);
		Scan scan = new Scan();
//		scan.setStartRow(Bytes.toBytes("3"));
//		scan.addColumn(Bytes.toBytes("fal"), Bytes.toBytes("val"));
		scan.addFamily(Bytes.toBytes("fal"));
		scan.setFilter(new FirstKeyOnlyFilter());
		long rowCount = 0;
		try {
			rowCount = ac.rowCount(Bytes.toBytes("test3"), new LongColumnInterpreter(), scan);
//			rowCount = ac.max(Bytes.toBytes("test"), new LongColumnInterpreter(), scan);
			System.out.println(rowCount);
		} catch (Throwable e) {
			e.printStackTrace();
		}
		long end = System.currentTimeMillis();
		System.out.println(end - begin);
	}
 
开发者ID:Justice-love,项目名称:oceandata,代码行数:20,代码来源:CoprocessorTest.java

示例7: getAllDataRowKeys

import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; //导入依赖的package包/类
private LoopRowKeysStrategy getAllDataRowKeys() throws IOException {
  LoopRowKeysStrategy strategy = null;
  HTable table = null;
  ResultScanner rs = null;
  try {
    table = new HTable(this.getConf(), this.vertexTableName);
  
    Scan scan = new Scan();
    scan.setFilter(new FirstKeyOnlyFilter());
    rs = table.getScanner(scan);
    strategy = new GetAllRowKeysStrategy(table, rs);
  } catch (IOException e) {
    LOG.error("getSampleDataRowKey failed", e);
    throw e;
  }
  return strategy;
}
 
开发者ID:trendmicro,项目名称:HGraph,代码行数:18,代码来源:GetGeneratedGraphData.java

示例8: transformColumns

import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; //导入依赖的package包/类
@Override
protected Collection<SchemaPath> transformColumns(Collection<SchemaPath> columns) {
  Set<SchemaPath> transformed = Sets.newLinkedHashSet();
  rowKeyOnly = true;
  if (!isStarQuery()) {
    for (SchemaPath column : columns) {
      if (column.getRootSegment().getPath().equalsIgnoreCase(ROW_KEY)) {
        transformed.add(ROW_KEY_PATH);
        continue;
      }
      rowKeyOnly = false;
      NameSegment root = column.getRootSegment();
      byte[] family = root.getPath().getBytes();
      transformed.add(SchemaPath.getSimplePath(root.getPath()));
      PathSegment child = root.getChild();
      if (child != null && child.isNamed()) {
        byte[] qualifier = child.getNameSegment().getPath().getBytes();
        hbaseScan.addColumn(family, qualifier);
      } else {
        hbaseScan.addFamily(family);
      }
    }
    /* if only the row key was requested, add a FirstKeyOnlyFilter to the scan
     * to fetch only one KV from each row. If a filter is already part of this
     * scan, add the FirstKeyOnlyFilter as the LAST filter of a MUST_PASS_ALL
     * FilterList.
     */
    if (rowKeyOnly) {
      hbaseScan.setFilter(
          HBaseUtils.andFilterAtIndex(hbaseScan.getFilter(), HBaseUtils.LAST_FILTER, new FirstKeyOnlyFilter()));
    }
  } else {
    rowKeyOnly = false;
    transformed.add(ROW_KEY_PATH);
  }


  return transformed;
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:40,代码来源:HBaseRecordReader.java

示例9: testFilterAllRecords

import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; //导入依赖的package包/类
@Test
public void testFilterAllRecords() throws IOException {
  Scan scan = new Scan();
  scan.setBatch(1);
  scan.setCaching(1);
  // Filter out any records
  scan.setFilter(new FilterList(new FirstKeyOnlyFilter(), new InclusiveStopFilter(new byte[0])));
  Table table = TEST_UTIL.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME);
  ResultScanner s = table.getScanner(scan);
  assertNull(s.next());
  table.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:TestFromClientSide.java

示例10: testReverseScanWithoutPadding

import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; //导入依赖的package包/类
@Test
public void testReverseScanWithoutPadding() throws Exception {
  byte[] row1 = Bytes.toBytes("a");
  byte[] row2 = Bytes.toBytes("ab");
  byte[] row3 = Bytes.toBytes("b");

  Put put1 = new Put(row1);
  put1.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
  Put put2 = new Put(row2);
  put2.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
  Put put3 = new Put(row3);
  put3.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);

  region.put(put1);
  region.put(put2);
  region.put(put3);
  region.flush(true);
  Scan scan = new Scan();
  scan.setCacheBlocks(false);
  scan.setReversed(true);
  scan.setFilter(new FirstKeyOnlyFilter());
  scan.addFamily(cfName);
  RegionScanner scanner = region.getScanner(scan);
  List<Cell> res = new ArrayList<Cell>();
  int count = 1;
  while (scanner.next(res)) {
    count++;
  }
  assertEquals(Bytes.toString(res.get(0).getRowArray(), res.get(0).getRowOffset(), res.get(0)
      .getRowLength()), "b");
  assertEquals(Bytes.toString(res.get(1).getRowArray(), res.get(1).getRowOffset(), res.get(1)
      .getRowLength()), "ab");
  assertEquals(Bytes.toString(res.get(2).getRowArray(), res.get(2).getRowOffset(), res.get(2)
      .getRowLength()), "a");
  assertEquals(3, count);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:37,代码来源:TestSeekBeforeWithReverseScan.java

示例11: testReverseScanWithPadding

import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; //导入依赖的package包/类
@Test
public void testReverseScanWithPadding() throws Exception {
  byte[] terminator = new byte[] { -1 };
  byte[] row1 = Bytes.add(invert(Bytes.toBytes("a")), terminator);
  byte[] row2 = Bytes.add(invert(Bytes.toBytes("ab")), terminator);
  byte[] row3 = Bytes.add(invert(Bytes.toBytes("b")), terminator);

  Put put1 = new Put(row1);
  put1.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
  Put put2 = new Put(row2);
  put2.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
  Put put3 = new Put(row3);
  put3.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);

  region.put(put1);
  region.put(put2);
  region.put(put3);
  region.flush(true);
  Scan scan = new Scan();
  scan.setCacheBlocks(false);
  scan.setReversed(true);
  scan.setFilter(new FirstKeyOnlyFilter());
  scan.addFamily(cfName);
  RegionScanner scanner = region.getScanner(scan);
  List<Cell> res = new ArrayList<Cell>();
  int count = 1;
  while (scanner.next(res)) {
    count++;
  }
  assertEquals(3, count);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:32,代码来源:TestSeekBeforeWithReverseScan.java

示例12: testPartialResultsWithColumnFilter

import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; //导入依赖的package包/类
/**
 * Test partial Result re-assembly in the presence of different filters. The Results from the
 * partial scanner should match the Results returned from a scanner that receives all of the
 * results in one RPC to the server. The partial scanner is tested with a variety of different
 * result sizes (all of which are less than the size necessary to fetch an entire row)
 * @throws Exception
 */
@Test
public void testPartialResultsWithColumnFilter() throws Exception {
  testPartialResultsWithColumnFilter(new FirstKeyOnlyFilter());
  testPartialResultsWithColumnFilter(new ColumnPrefixFilter(Bytes.toBytes("testQualifier5")));
  testPartialResultsWithColumnFilter(new ColumnRangeFilter(Bytes.toBytes("testQualifer1"), true,
      Bytes.toBytes("testQualifier7"), true));

  Set<byte[]> qualifiers = new LinkedHashSet<>();
  qualifiers.add(Bytes.toBytes("testQualifier5"));
  testPartialResultsWithColumnFilter(new FirstKeyValueMatchingQualifiersFilter(qualifiers));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestPartialResultsFromClientSide.java

示例13: testFirstKeyOnlyFilter

import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; //导入依赖的package包/类
@Test
public void testFirstKeyOnlyFilter() throws Exception {
  Scan s = new Scan();
  s.setFilter(new FirstKeyOnlyFilter());
  // Expected KVs, the first KV from each of the remaining 6 rows
  KeyValue [] kvs = {
      new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
      new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
      new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
      new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
      new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
      new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1])
  };
  verifyScanFull(s, kvs);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:TestScannersWithFilters.java

示例14: getRowCount

import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; //导入依赖的package包/类
/**
 * Returns a count of the rows in the region where this coprocessor is loaded.
 */
@Override
public void getRowCount(RpcController controller, ExampleProtos.CountRequest request,
                        RpcCallback<ExampleProtos.CountResponse> done) {
  Scan scan = new Scan();
  scan.setFilter(new FirstKeyOnlyFilter());
  ExampleProtos.CountResponse response = null;
  InternalScanner scanner = null;
  try {
    scanner = env.getRegion().getScanner(scan);
    List<Cell> results = new ArrayList<Cell>();
    boolean hasMore = false;
    byte[] lastRow = null;
    long count = 0;
    do {
      hasMore = scanner.next(results);
      for (Cell kv : results) {
        byte[] currentRow = CellUtil.cloneRow(kv);
        if (lastRow == null || !Bytes.equals(lastRow, currentRow)) {
          lastRow = currentRow;
          count++;
        }
      }
      results.clear();
    } while (hasMore);

    response = ExampleProtos.CountResponse.newBuilder()
        .setCount(count).build();
  } catch (IOException ioe) {
    ResponseConverter.setControllerException(controller, ioe);
  } finally {
    if (scanner != null) {
      try {
        scanner.close();
      } catch (IOException ignored) {}
    }
  }
  done.run(response);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:42,代码来源:RowCountEndpoint.java

示例15: executionData

import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; //导入依赖的package包/类
List<WorkflowInstanceExecutionData> executionData(WorkflowId workflowId, String offset, int limit)
    throws IOException {
  try (final Table eventsTable = connection.getTable(EVENTS_TABLE_NAME)) {
    final Scan scan = new Scan()
        .setRowPrefixFilter(Bytes.toBytes(workflowId.toKey() + '#'))
        .setFilter(new FirstKeyOnlyFilter());

    if (!Strings.isNullOrEmpty(offset)) {
      final WorkflowInstance offsetInstance = WorkflowInstance.create(workflowId, offset);
      scan.setStartRow(Bytes.toBytes(offsetInstance.toKey() + '#'));
    }

    final Set<WorkflowInstance> workflowInstancesSet = Sets.newHashSet();
    try (ResultScanner scanner = eventsTable.getScanner(scan)) {
      Result result = scanner.next();
      while (result != null) {
        final String key = new String(result.getRow());
        final int lastHash = key.lastIndexOf('#');

        final WorkflowInstance wfi = WorkflowInstance.parseKey(key.substring(0, lastHash));
        workflowInstancesSet.add(wfi);
        if (workflowInstancesSet.size() == limit) {
          break;
        }

        result = scanner.next();
      }
    }

    return executionData(workflowInstancesSet);
  }
}
 
开发者ID:spotify,项目名称:styx,代码行数:33,代码来源:BigtableStorage.java


注:本文中的org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。