当前位置: 首页>>代码示例>>Java>>正文


Java Scan.addColumn方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.Scan.addColumn方法的典型用法代码示例。如果您正苦于以下问题:Java Scan.addColumn方法的具体用法?Java Scan.addColumn怎么用?Java Scan.addColumn使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.client.Scan的用法示例。


在下文中一共展示了Scan.addColumn方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testMaxWithInvalidRange

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test(timeout = 300000)
public void testMaxWithInvalidRange() {
  AggregationClient aClient = new AggregationClient(conf);
  final ColumnInterpreter<Double, Double, EmptyMsg, DoubleMsg, DoubleMsg> ci = 
      new DoubleColumnInterpreter();
  Scan scan = new Scan();
  scan.setStartRow(ROWS[4]);
  scan.setStopRow(ROWS[2]);
  scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
  double max = Double.MIN_VALUE;
  ;
  try {
    max = aClient.max(TEST_TABLE, ci, scan);
  } catch (Throwable e) {
    max = 0.00;
  }
  assertEquals(0.00, max, 0.00);// control should go to the catch block
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestDoubleColumnInterpreter.java

示例2: testRow

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Override
void testRow(final int i) throws IOException {
  Scan scan = new Scan(getRandomRow(this.rand, opts.totalRows));
  scan.setCaching(opts.caching);
  FilterList list = new FilterList();
  if (opts.addColumns) {
    scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
  } else {
    scan.addFamily(FAMILY_NAME);
  }
  if (opts.filterAll) {
    list.addFilter(new FilterAllFilter());
  }
  list.addFilter(new WhileMatchFilter(new PageFilter(120)));
  scan.setFilter(list);
  ResultScanner s = this.table.getScanner(scan);
  for (Result rr; (rr = s.next()) != null;) {
    updateValueSize(rr);
  }
  s.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:PerformanceEvaluation.java

示例3: testMaxWithInvalidRange

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testMaxWithInvalidRange() {
  AggregationClient aClient = new AggregationClient(conf);
  final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
    new BigDecimalColumnInterpreter();
  Scan scan = new Scan();
  scan.setStartRow(ROWS[4]);
  scan.setStopRow(ROWS[2]);
  scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
  BigDecimal max = new BigDecimal(Long.MIN_VALUE);
  ;
  try {
    max = aClient.max(TEST_TABLE, ci, scan);
  } catch (Throwable e) {
    max = BigDecimal.ZERO;
  }
  assertEquals(BigDecimal.ZERO, max);// control should go to the catch block
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestBigDecimalColumnInterpreter.java

示例4: constructScan

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
protected Scan constructScan(byte[] valuePrefix) throws IOException {
  FilterList list = new FilterList();
  Filter filter = new SingleColumnValueFilter(
      FAMILY_NAME, COLUMN_ZERO, CompareFilter.CompareOp.EQUAL,
      new BinaryComparator(valuePrefix)
  );
  list.addFilter(filter);
  if(opts.filterAll) {
    list.addFilter(new FilterAllFilter());
  }
  Scan scan = new Scan();
  scan.setCaching(opts.caching);
  if (opts.addColumns) {
    scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
  } else {
    scan.addFamily(FAMILY_NAME);
  }
  scan.setFilter(list);
  return scan;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:PerformanceEvaluation.java

示例5: testRawScanWithColumns

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
 * The ExplicitColumnTracker does not support "raw" scanning.
 */
@Test
public void testRawScanWithColumns() throws Exception {
  HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 3,
      HConstants.FOREVER, KeepDeletedCells.TRUE);
  HRegion region = hbu.createLocalHRegion(htd, null, null);

  Scan s = new Scan();
  s.setRaw(true);
  s.setMaxVersions();
  s.addColumn(c0, c0);

  try {
    region.getScanner(s);
    fail("raw scanner with columns should have failed");
  } catch (org.apache.hadoop.hbase.DoNotRetryIOException dnre) {
    // ok!
  }

  HRegion.closeHRegion(region);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:TestKeepDeletes.java

示例6: testRow

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Override
void testRow(final int i) throws IOException {
  Pair<byte[], byte[]> startAndStopRow = getStartAndStopRow();
  Scan scan = new Scan(startAndStopRow.getFirst(), startAndStopRow.getSecond());
  scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
  ResultScanner s = this.table.getScanner(scan);
  int count = 0;
  for (Result rr = null; (rr = s.next()) != null;) {
    count++;
  }

  if (i % 100 == 0) {
    LOG.info(String.format("Scan for key range %s - %s returned %s rows",
        Bytes.toString(startAndStopRow.getFirst()),
        Bytes.toString(startAndStopRow.getSecond()), count));
  }

  s.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:PerformanceEvaluation.java

示例7: testMaxWithInvalidRange2

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test(timeout = 300000)
public void testMaxWithInvalidRange2() throws Throwable {
  double max = Double.MIN_VALUE;
  Scan scan = new Scan();
  scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
  scan.setStartRow(ROWS[4]);
  scan.setStopRow(ROWS[4]);
  try {
    AggregationClient aClient = new AggregationClient(conf);
    final ColumnInterpreter<Double, Double, EmptyMsg, DoubleMsg, DoubleMsg> ci = 
        new DoubleColumnInterpreter();
    max = aClient.max(TEST_TABLE, ci, scan);
  } catch (Exception e) {
    max = 0.00;
  }
  assertEquals(0.00, max, 0.00);// control should go to the catch block
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:TestDoubleColumnInterpreter.java

示例8: testStdWithValidRange

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
 * @throws Throwable
 */
@Test(timeout = 300000)
public void testStdWithValidRange() throws Throwable {
  AggregationClient aClient = new AggregationClient(conf);
  Scan scan = new Scan();
  scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
  final ColumnInterpreter<Double, Double, EmptyMsg, DoubleMsg, DoubleMsg> ci = 
      new DoubleColumnInterpreter();
  double std = aClient.std(TEST_TABLE, ci, scan);
  assertEquals(5.766, std, 0.05d);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:TestDoubleColumnInterpreter.java

示例9: find

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Override
public <T> T find(TableName tableName, String family, String qualifier, final
ResultsExtractor<T> action) {
    Scan scan = new Scan();
    scan.addColumn(family.getBytes(getCharset()), qualifier.getBytes(getCharset()));
    return find(tableName, scan, action);
}
 
开发者ID:fchenxi,项目名称:easyhbase,代码行数:8,代码来源:HbaseTemplate2.java

示例10: testMedianWithValidRange

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
 * @throws Throwable
 */
@Test (timeout=300000)
public void testMedianWithValidRange() throws Throwable {
  AggregationClient aClient = new AggregationClient(conf);
  Scan scan = new Scan();
  scan.addColumn(TEST_FAMILY,TEST_QUALIFIER);
  final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci = 
      new LongColumnInterpreter();
  long median = aClient.median(TEST_TABLE, ci,
      scan);
  assertEquals(8L, median);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:TestAggregateProtocol.java

示例11: testAvgWithFilter

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test(timeout = 300000)
public void testAvgWithFilter() throws Throwable {
  AggregationClient aClient = new AggregationClient(conf);
  Scan scan = new Scan();
  scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
  Filter f = new PrefixFilter(Bytes.toBytes("foo:bar"));
  scan.setFilter(f);
  final ColumnInterpreter<Double, Double, EmptyMsg, DoubleMsg, DoubleMsg> ci = 
      new DoubleColumnInterpreter();
  Double avg = null;
  avg = aClient.avg(TEST_TABLE, ci, scan);
  assertEquals(Double.NaN, avg, 0);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:TestDoubleColumnInterpreter.java

示例12: testSumWithValidRange

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
 * @throws Throwable
 */
@Test (timeout=300000)
public void testSumWithValidRange() throws Throwable {
  AggregationClient aClient = new AggregationClient(conf);
  Scan scan = new Scan();
  scan.addColumn(TEST_FAMILY,TEST_QUALIFIER);
  final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
      new LongColumnInterpreter();
  long sum = aClient.sum(TEST_TABLE, ci,
      scan);
  assertEquals(190, sum);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:TestAggregateProtocol.java

示例13: testMaxWithValidRange

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
 * give max for the entire table.
 * @throws Throwable
 */
@Test (timeout=300000)
public void testMaxWithValidRange() throws Throwable {
  AggregationClient aClient = new AggregationClient(conf);
  Scan scan = new Scan();
  scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
  final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
      new LongColumnInterpreter();
  long maximum = aClient.max(TEST_TABLE, ci, scan);
  assertEquals(19, maximum);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:TestAggregateProtocol.java

示例14: scannerOpen

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Override
public int scannerOpen(ByteBuffer tableName, ByteBuffer startRow,
    List<ByteBuffer> columns,
    Map<ByteBuffer, ByteBuffer> attributes) throws IOError {

  Table table = null;
  try {
    table = getTable(tableName);
    Scan scan = new Scan(getBytes(startRow));
    addAttributes(scan, attributes);
    if(columns != null && columns.size() != 0) {
      for(ByteBuffer column : columns) {
        byte [][] famQf = KeyValue.parseColumn(getBytes(column));
        if(famQf.length == 1) {
          scan.addFamily(famQf[0]);
        } else {
          scan.addColumn(famQf[0], famQf[1]);
        }
      }
    }
    return addScanner(table.getScanner(scan), false);
  } catch (IOException e) {
    LOG.warn(e.getMessage(), e);
    throw new IOError(Throwables.getStackTraceAsString(e));
  } finally{
    closeTable(table);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:ThriftServerRunner.java

示例15: scannerOpenWithStop

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Override
public int scannerOpenWithStop(ByteBuffer tableName, ByteBuffer startRow,
    ByteBuffer stopRow, List<ByteBuffer> columns,
    Map<ByteBuffer, ByteBuffer> attributes)
    throws IOError, TException {

  Table table = null;
  try {
    table = getTable(tableName);
    Scan scan = new Scan(getBytes(startRow), getBytes(stopRow));
    addAttributes(scan, attributes);
    if(columns != null && columns.size() != 0) {
      for(ByteBuffer column : columns) {
        byte [][] famQf = KeyValue.parseColumn(getBytes(column));
        if(famQf.length == 1) {
          scan.addFamily(famQf[0]);
        } else {
          scan.addColumn(famQf[0], famQf[1]);
        }
      }
    }
    return addScanner(table.getScanner(scan), false);
  } catch (IOException e) {
    LOG.warn(e.getMessage(), e);
    throw new IOError(Throwables.getStackTraceAsString(e));
  } finally{
    closeTable(table);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:ThriftServerRunner.java


注:本文中的org.apache.hadoop.hbase.client.Scan.addColumn方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。