当前位置: 首页>>代码示例>>Java>>正文


Java IndexSpecification.addIndexColumn方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.index.IndexSpecification.addIndexColumn方法的典型用法代码示例。如果您正苦于以下问题:Java IndexSpecification.addIndexColumn方法的具体用法?Java IndexSpecification.addIndexColumn怎么用?Java IndexSpecification.addIndexColumn使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.index.IndexSpecification的用法示例。


在下文中一共展示了IndexSpecification.addIndexColumn方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testShouldNotThrowNPEIfValueTypeIsNull

import org.apache.hadoop.hbase.index.IndexSpecification; //导入方法依赖的package包/类
public void testShouldNotThrowNPEIfValueTypeIsNull() throws Exception {
  IndexManager im = IndexManager.getInstance();
  assertNotNull("Index Manager should not be null.", im);

  List<IndexSpecification> indexList = new ArrayList<IndexSpecification>(1);
  IndexSpecification iSpec = new IndexSpecification("index_name");

  iSpec.addIndexColumn(new HColumnDescriptor("cf"), "cq", null, 5);
  indexList.add(iSpec);
  im.addIndexForTable("index_name", indexList);
  indexList = im.getIndicesForTable("index_name");

  Set<ColumnQualifier> indexColumns = indexList.get(0).getIndexColumns();
  for (ColumnQualifier columnQualifier : indexColumns) {
    assertNotNull(columnQualifier.getType());
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:18,代码来源:TestIndexManager.java

示例2: testNoIndexExpression

import org.apache.hadoop.hbase.index.IndexSpecification; //导入方法依赖的package包/类
@Test
public void testNoIndexExpression() throws Exception {
  IndexExpression exp = new NoIndexExpression();
  Scan scan = new Scan();
  scan.setAttribute(Constants.INDEX_EXPRESSION, IndexUtils.toBytes(exp));
  byte[] value1 = Bytes.toBytes("asdf");
  scan.setFilter(new SingleColumnValueFilter(FAMILY1, QUALIFIER1, CompareOp.EQUAL, value1));
  List<IndexSpecification> indices = new ArrayList<IndexSpecification>();
  IndexSpecification is1 = new IndexSpecification("idx1");
  HColumnDescriptor colDesc = new HColumnDescriptor(FAMILY1);
  is1.addIndexColumn(colDesc, COL1, ValueType.String, 15);
  indices.add(is1);
  ScanFilterEvaluator evaluator = new ScanFilterEvaluator();
  HRegion region =
      initHRegion(tableName.getBytes(), null, null, "testNoIndexExpression",
        TEST_UTIL.getConfiguration(), FAMILY1);
  IndexRegionScanner scanner = evaluator.evaluate(scan, indices, new byte[0], region, tableName);
  assertNull(scanner);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:20,代码来源:TestScanFilterEvaluatorForIndexInScan.java

示例3: testIndexPutWithOffsetAndLength

import org.apache.hadoop.hbase.index.IndexSpecification; //导入方法依赖的package包/类
@Test(timeout = 180000)
public void testIndexPutWithOffsetAndLength() throws IOException {
  Path basedir = new Path(DIR + "TestIndexPut");
  Configuration conf = TEST_UTIL.getConfiguration();
  HTableDescriptor htd =
      new HTableDescriptor(TableName.valueOf("testIndexPutWithOffsetAndLength"));
  HRegionInfo info = new HRegionInfo(htd.getTableName(), "ABC".getBytes(), "BBB".getBytes(), false);
  HRegion region = HRegion.createHRegion(info, basedir, conf, htd);
  IndexSpecification spec = new IndexSpecification("index");
  spec.addIndexColumn(new HColumnDescriptor("col"), "ql1", new SpatialPartition(20, 2),
    ValueType.String, 18);

  byte[] value1 = "AB---CD---EF---GH---IJ---KL---MN---OP---".getBytes();
  Put p = new Put("row".getBytes());
  p.add("col".getBytes(), "ql1".getBytes(), value1);
  Put indexPut = IndexUtils.prepareIndexPut(p, spec, region);
  byte[] indexRowKey = indexPut.getRow();
  byte[] actualResult = new byte[2];
  System.arraycopy(indexRowKey, 22, actualResult, 0, actualResult.length);
  byte[] expectedResult = new byte[2];
  System.arraycopy("IJ".getBytes(), 0, expectedResult, 0, "IJ".getBytes().length);
  Assert.assertTrue(Bytes.equals(actualResult, expectedResult));
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:24,代码来源:TestExtendedPutOps.java

示例4: testIndexPutRowkeyWithAllTheValues

import org.apache.hadoop.hbase.index.IndexSpecification; //导入方法依赖的package包/类
@Test(timeout = 180000)
public void testIndexPutRowkeyWithAllTheValues() throws IOException {
  String DIR = UTIL.getDataTestDir("TestStore").toString();
  Path basedir = new Path(DIR + "TestIndexPut");
  // Path logdir = new Path(DIR+"TestIndexPut"+"/logs");
  FileSystem fs = UTIL.getTestFileSystem();
  Configuration conf = UTIL.getConfiguration();
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("TestIndexPut"));
  HRegionInfo info = new HRegionInfo(htd.getTableName(), "A".getBytes(), "B".getBytes(), false);
  HLog hlog = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL();
  HRegion region = new HRegion(basedir, hlog, fs, conf, info, htd, null);
  IndexSpecification spec = new IndexSpecification("testSpec");
  spec.addIndexColumn(new HColumnDescriptor("cf1"), "ql1", ValueType.String, 10);
  spec.addIndexColumn(new HColumnDescriptor("cf2"), "ql1", ValueType.String, 10);

  // Scenario where both the indexed cols are there in the put
  byte[] rowKey = "Arow1".getBytes();
  Put p = new Put(rowKey);
  long time = 1234567;
  p.add("cf1".getBytes(), "ql1".getBytes(), time, "testvalue1".getBytes());
  p.add("cf2".getBytes(), "ql1".getBytes(), time + 10, "testvalue1".getBytes());
  Put indexPut = IndexUtils.prepareIndexPut(p, spec, region);
  Assert.assertEquals(region.getStartKey().length + 1 + Constants.DEF_MAX_INDEX_NAME_LENGTH + 2
      * 10 + rowKey.length, indexPut.getRow().length);
  Assert.assertEquals(time + 10, indexPut.get(Constants.IDX_COL_FAMILY, "".getBytes()).get(0)
      .getTimestamp());

}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:29,代码来源:TestIndexRegionObserver.java

示例5: testRemoveIndicesForTable

import org.apache.hadoop.hbase.index.IndexSpecification; //导入方法依赖的package包/类
public void testRemoveIndicesForTable() throws Exception {

    IndexManager im = IndexManager.getInstance();
    assertNotNull("Index Manager should not be null.", im);

    List<IndexSpecification> indexList = new ArrayList<IndexSpecification>(1);
    IndexSpecification iSpec = new IndexSpecification("index_name");

    iSpec.addIndexColumn(new HColumnDescriptor("cf"), "cq", null, 10);
    indexList.add(iSpec);
    im.removeIndices("index_name");
    indexList = im.getIndicesForTable("index_name");
    assertNull("Index specification List should be null.", indexList);

  }
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:16,代码来源:TestIndexManager.java

示例6: testPutWithValueLengthMoreThanMaxValueLength

import org.apache.hadoop.hbase.index.IndexSpecification; //导入方法依赖的package包/类
@Test(timeout = 180000)
public void testPutWithValueLengthMoreThanMaxValueLength() throws IOException, KeeperException,
    InterruptedException {
  Configuration conf = UTIL.getConfiguration();
  String userTableName = "testPutWithValueLengthMoreThanMaxValueLength";
  HTableDescriptor ihtd = new HTableDescriptor(TableName.valueOf(userTableName));
  HColumnDescriptor hcd = new HColumnDescriptor("col");
  IndexSpecification iSpec1 = new IndexSpecification("Index1");
  iSpec1.addIndexColumn(hcd, "ql1", ValueType.String, 10);
  ihtd.addFamily(hcd);
  TableIndices indices = new TableIndices();
  indices.addIndex(iSpec1);
  ihtd.setValue(Constants.INDEX_SPEC_KEY, indices.toByteArray());
  admin.createTable(ihtd);

  HTable table = new HTable(conf, userTableName);
  table.setAutoFlush(false, false);
  List<Put> putList = new ArrayList<Put>(3);
  putList.add(new Put("row1".getBytes()).add("col".getBytes(), "ql1".getBytes(),
    "valueLengthMoreThanMaxValueLength".getBytes()));
  putList.add(new Put("row2".getBytes()).add("col".getBytes(), "ql1".getBytes(),
    "myValue".getBytes()));
  putList.add(new Put("row3".getBytes()).add("col".getBytes(), "ql1".getBytes(),
    "myValue".getBytes()));
  table.put(putList);
  try {
    table.flushCommits();
  } catch (RetriesExhaustedWithDetailsException e) {
    // nothing to do.
  }
  Assert.assertEquals(1, table.getWriteBuffer().size());
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:33,代码来源:TestIndexRegionObserver.java

示例7: testPreCreateShouldNotBeSuccessfulIfIndicesAreNotSameAtLength

import org.apache.hadoop.hbase.index.IndexSpecification; //导入方法依赖的package包/类
@Test(timeout = 180000)
public void testPreCreateShouldNotBeSuccessfulIfIndicesAreNotSameAtLength() throws IOException,
    KeeperException, InterruptedException {
  ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(UTIL);
  String userTableName = "testNotConsisIndex2";
  HTableDescriptor ihtd = new HTableDescriptor(TableName.valueOf(userTableName));
  HColumnDescriptor hcd = new HColumnDescriptor("col");
  IndexSpecification iSpec1 = new IndexSpecification("Index1");
  iSpec1.addIndexColumn(hcd, "q1", ValueType.String, 10);
  iSpec1.addIndexColumn(hcd, "q2", ValueType.String, 4);
  ihtd.addFamily(hcd);
  TableIndices indices = new TableIndices();
  indices.addIndex(iSpec1);
  IndexSpecification iSpec2 = new IndexSpecification("Index2");
  iSpec2.addIndexColumn(hcd, "q3", ValueType.String, 10);
  iSpec2.addIndexColumn(hcd, "q2", ValueType.String, 10);
  indices.addIndex(iSpec2);
  ihtd.setValue(Constants.INDEX_SPEC_KEY, indices.toByteArray());
  boolean returnVal = false;
  try {
    admin.createTable(ihtd);
    fail("Exception should be thrown");
  } catch (IOException e) {
    returnVal = true;
  }
  Assert.assertTrue(returnVal);
  ZKAssign.blockUntilNoRIT(zkw);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:29,代码来源:TestIndexMasterObserver.java

示例8: testSequenceOfIndexPutsWithDataTypes

import org.apache.hadoop.hbase.index.IndexSpecification; //导入方法依赖的package包/类
@Test(timeout = 180000)
public void testSequenceOfIndexPutsWithDataTypes() throws IOException {
  Path basedir = new Path(DIR + "TestIndexPut");
  Configuration conf = TEST_UTIL.getConfiguration();
  HTableDescriptor htd =
      new HTableDescriptor(TableName.valueOf("testSequenceOfIndexPutsWithDataTypes"));
  HRegionInfo info = new HRegionInfo(htd.getTableName(), "ABC".getBytes(), "BBB".getBytes(), false);
  HRegion region = HRegion.createHRegion(info, basedir, conf, htd);
  IndexSpecification spec = new IndexSpecification("index");
  spec.addIndexColumn(new HColumnDescriptor("col"), "ql1", ValueType.Int, 4);

  byte[] value1 = Bytes.toBytes(1000);
  Put p = new Put("row".getBytes());
  p.add("col".getBytes(), "ql1".getBytes(), value1);
  Put indexPut = IndexUtils.prepareIndexPut(p, spec, region);
  int a = 1000;
  byte[] expectedResult = Bytes.toBytes(a ^ (1 << 31));
  byte[] actualResult = new byte[4];
  byte[] indexRowKey = indexPut.getRow();
  System.arraycopy(indexRowKey, 22, actualResult, 0, actualResult.length);
  Assert.assertTrue(Bytes.equals(expectedResult, actualResult));

  value1 = Bytes.toBytes(-2562351);
  p = new Put("row".getBytes());
  p.add("col".getBytes(), "ql1".getBytes(), value1);
  Put indexPut1 = IndexUtils.prepareIndexPut(p, spec, region);
  a = -2562351;
  byte[] expectedResult1 = Bytes.toBytes(a ^ (1 << 31));
  byte[] actualResult1 = new byte[4];
  byte[] indexRowKey1 = indexPut1.getRow();
  System.arraycopy(indexRowKey1, 22, actualResult1, 0, actualResult1.length);
  Assert.assertTrue(Bytes.equals(expectedResult1, actualResult1));

  Assert.assertTrue(Bytes.compareTo(indexPut.getRow(), indexPut1.getRow()) > 0);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:36,代码来源:TestExtendedPutOps.java

示例9: createIndexSpecification

import org.apache.hadoop.hbase.index.IndexSpecification; //导入方法依赖的package包/类
private IndexSpecification createIndexSpecification(HColumnDescriptor hcd, ValueType type,
    int maxValueLength, String[] qualifiers, String name) {
  IndexSpecification index = new IndexSpecification(name.getBytes());
  for (String qualifier : qualifiers) {
    index.addIndexColumn(hcd, qualifier, type, maxValueLength);
  }
  return index;
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:9,代码来源:TestMultipleIndicesInScan.java

示例10: testIndexPutWithValueGreaterThanLength

import org.apache.hadoop.hbase.index.IndexSpecification; //导入方法依赖的package包/类
@Test(timeout = 180000)
public void testIndexPutWithValueGreaterThanLength() throws IOException {
  String DIR = UTIL.getDataTestDir("TestStore").toString();
  Path basedir = new Path(DIR + "TestIndexPut");
  // Path logdir = new Path(DIR+"TestIndexPut"+"/logs");
  FileSystem fs = UTIL.getTestFileSystem();
  Configuration conf = UTIL.getConfiguration();
  HTableDescriptor htd = new HTableDescriptor("TestIndexPut");
  HRegionInfo info = new HRegionInfo(htd.getTableName(), "A".getBytes(), "B".getBytes(), false);
  HLog hlog = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL();
  HRegion region = new HRegion(basedir, hlog, fs, conf, info, htd, null);
  IndexSpecification spec = new IndexSpecification("testSpec");
  spec.addIndexColumn(new HColumnDescriptor("cf1"), "ql1", ValueType.String, 10);
  spec.addIndexColumn(new HColumnDescriptor("cf2"), "ql1", ValueType.String, 10);

  // assert IOException when value length goes beyond the limit.
  byte[] rowKey = "Arow1".getBytes();
  Put p = new Put(rowKey);
  long time = 1234567;
  boolean returnVal = false;
  try {
    p.add("cf1".getBytes(), "ql1".getBytes(), time, "testvalue11".getBytes());
    IndexUtils.prepareIndexPut(p, spec, region);
  } catch (IOException e) {
    returnVal = true;
  }
  Assert.assertTrue(returnVal);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:29,代码来源:TestIndexRegionObserver.java

示例11: testScanMultipleIdxWithSameColFamilyAndDifferentQualifierShouldBeSuccessful

import org.apache.hadoop.hbase.index.IndexSpecification; //导入方法依赖的package包/类
@Test(timeout = 180000)
public void testScanMultipleIdxWithSameColFamilyAndDifferentQualifierShouldBeSuccessful()
    throws Exception {

  Configuration conf = UTIL.getConfiguration();
  String userTableName = "testScanWithMultIndexedSameColFamilyColumn";
  HTableDescriptor ihtd = new HTableDescriptor(TableName.valueOf(userTableName));
  HColumnDescriptor hcd1 = new HColumnDescriptor("col1");
  ihtd.addFamily(hcd1);
  IndexSpecification idx1 = new IndexSpecification("ScanMulIndex");
  idx1.addIndexColumn(hcd1, "ql", ValueType.String, 10);
  idx1.addIndexColumn(hcd1, "q2", ValueType.String, 10);
  TableIndices indices = new TableIndices();
  indices.addIndex(idx1);
  ihtd.setValue(Constants.INDEX_SPEC_KEY, indices.toByteArray());
  admin.createTable(ihtd);
  HTable table = new HTable(conf, userTableName);

  // test put with the indexed column
  Put p1 = new Put("row1".getBytes());
  p1.add("col1".getBytes(), "ql".getBytes(), "cat".getBytes());
  p1.add("col1".getBytes(), "q2".getBytes(), "dog".getBytes());
  table.put(p1);

  Put p2 = new Put("row2".getBytes());
  p2.add("col1".getBytes(), "ql".getBytes(), "dog".getBytes());
  p2.add("col1".getBytes(), "q2".getBytes(), "cat".getBytes());
  table.put(p2);

  Put p3 = new Put("row3".getBytes());
  p3.add("col1".getBytes(), "ql".getBytes(), "cat".getBytes());
  p3.add("col1".getBytes(), "q2".getBytes(), "dog".getBytes());
  table.put(p3);

  int i = 0;
  Scan s = new Scan();
  FilterList filterList = new FilterList();
  // check for combination of cat in q1 and dog in q2
  SingleColumnValueFilter filter1 =
      new SingleColumnValueFilter("col1".getBytes(), "ql".getBytes(), CompareOp.EQUAL,
          "cat".getBytes());
  filter1.setFilterIfMissing(true);
  SingleColumnValueFilter filter2 =
      new SingleColumnValueFilter("col1".getBytes(), "q2".getBytes(), CompareOp.EQUAL,
          "dog".getBytes());
  filter2.setFilterIfMissing(true);
  filterList.addFilter(filter1);

  filterList.addFilter(filter2);
  s.setFilter(filterList);

  ResultScanner scanner = table.getScanner(s);
  for (Result result : scanner) {
    i++;
  }

  Assert.assertEquals("Should match for 2 rows in multiple index successfully ", 2, i);
  Assert.assertTrue("Indexed table should be used ", IndexRegionObserver.getIndexedFlowUsed());
  Assert.assertTrue("Seek points should be added ", IndexRegionObserver.getSeekpointAdded());

}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:62,代码来源:TestIndexRegionObserverForScan.java

示例12: testSequenceOfIndexPutsWithNegativeInteger

import org.apache.hadoop.hbase.index.IndexSpecification; //导入方法依赖的package包/类
@Test(timeout = 180000)
public void testSequenceOfIndexPutsWithNegativeInteger() throws IOException {
  Path basedir = new Path(DIR + "TestIndexPut");
  Configuration conf = TEST_UTIL.getConfiguration();
  HTableDescriptor htd =
      new HTableDescriptor(TableName.valueOf("testSequenceOfIndexPutsWithDataTypes"));
  HRegionInfo info = new HRegionInfo(htd.getTableName(), "ABC".getBytes(), "BBB".getBytes(), false);
  HRegion region = HRegion.createHRegion(info, basedir, conf, htd);
  IndexSpecification spec = new IndexSpecification("index");
  spec.addIndexColumn(new HColumnDescriptor("col"), "ql1", ValueType.Int, 4);

  byte[] value1 = Bytes.toBytes(-1000);
  Put p = new Put("row".getBytes());
  p.add("col".getBytes(), "ql1".getBytes(), value1);
  Put indexPut = IndexUtils.prepareIndexPut(p, spec, region);
  int a = -1000;
  byte[] expectedResult = Bytes.toBytes(a ^ (1 << 31));
  byte[] actualResult = new byte[4];
  byte[] indexRowKey = indexPut.getRow();
  System.arraycopy(indexRowKey, 22, actualResult, 0, actualResult.length);
  Assert.assertTrue(Bytes.equals(expectedResult, actualResult));

  value1 = Bytes.toBytes(-1500);
  p = new Put("row".getBytes());
  p.add("col".getBytes(), "ql1".getBytes(), value1);
  Put indexPut1 = IndexUtils.prepareIndexPut(p, spec, region);
  a = -1500;
  byte[] expectedResult1 = Bytes.toBytes(a ^ (1 << 31));
  byte[] actualResult1 = new byte[4];
  byte[] indexRowKey1 = indexPut1.getRow();
  System.arraycopy(indexRowKey1, 22, actualResult1, 0, actualResult1.length);
  Assert.assertTrue(Bytes.equals(expectedResult1, actualResult1));

  Assert.assertTrue(Bytes.compareTo(indexPut.getRow(), indexPut1.getRow()) > 0);

  value1 = Bytes.toBytes(1500);
  p = new Put("row".getBytes());
  p.add("col".getBytes(), "ql1".getBytes(), value1);
  Put indexPut2 = IndexUtils.prepareIndexPut(p, spec, region);
  a = 1500;
  byte[] expectedResult2 = Bytes.toBytes(a ^ (1 << 31));
  byte[] actualResult2 = new byte[4];
  byte[] indexRowKey2 = indexPut2.getRow();
  System.arraycopy(indexRowKey2, 22, actualResult2, 0, actualResult2.length);
  Assert.assertTrue(Bytes.equals(expectedResult2, actualResult2));

  Assert.assertTrue(Bytes.compareTo(indexPut2.getRow(), indexPut.getRow()) > 0);

  value1 = Bytes.toBytes(2000);
  p = new Put("row".getBytes());
  p.add("col".getBytes(), "ql1".getBytes(), value1);
  Put indexPut3 = IndexUtils.prepareIndexPut(p, spec, region);
  a = 2000;
  byte[] expectedResult3 = Bytes.toBytes(a ^ (1 << 31));
  byte[] actualResult3 = new byte[4];
  byte[] indexRowKey3 = indexPut3.getRow();
  System.arraycopy(indexRowKey3, 22, actualResult3, 0, actualResult3.length);
  Assert.assertTrue(Bytes.equals(expectedResult3, actualResult3));

  Assert.assertTrue(Bytes.compareTo(indexPut3.getRow(), indexPut2.getRow()) > 0);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:62,代码来源:TestExtendedPutOps.java

示例13: testCheckAndPutFor1PutShouldHav2PutsInIndexTableAndShouldReplaceWithNewValue

import org.apache.hadoop.hbase.index.IndexSpecification; //导入方法依赖的package包/类
@Test(timeout = 180000)
public void testCheckAndPutFor1PutShouldHav2PutsInIndexTableAndShouldReplaceWithNewValue()
    throws Exception {
  Configuration conf = UTIL.getConfiguration();
  String userTableName = "testCheckAndPutContainingTheIndexedColumn";
  HTableDescriptor ihtd = new HTableDescriptor(TableName.valueOf(userTableName));
  HColumnDescriptor hcd = new HColumnDescriptor("col");
  IndexSpecification iSpec = new IndexSpecification("Index1");
  iSpec.addIndexColumn(hcd, "q1", ValueType.String, 10);
  ihtd.addFamily(hcd);
  TableIndices indices = new TableIndices();
  indices.addIndex(iSpec);
  ihtd.setValue(Constants.INDEX_SPEC_KEY, indices.toByteArray());
  admin.createTable(ihtd);
  String idxTableName = userTableName + Constants.INDEX_TABLE_SUFFIX;
  HTable table = new HTable(conf, userTableName);
  // test put with the indexed column
  Put p = new Put("row1".getBytes());
  p.add("col".getBytes(), "q1".getBytes(), "myValue".getBytes());
  table.put(p);

  int usertableCount = countNumberOfRows(userTableName);
  Assert.assertEquals(1, usertableCount);
  int idxtableCount = countNumberOfRows(idxTableName);
  Assert.assertEquals(1, idxtableCount);

  // Test check and put
  Put p1 = new Put("row1".getBytes());
  p1.add("col".getBytes(), "q1".getBytes(), "myNewValue".getBytes());
  Assert.assertTrue(table.checkAndPut("row1".getBytes(), "col".getBytes(), "q1".getBytes(),
    "myValue".getBytes(), p1));
  usertableCount = countNumberOfRows(userTableName);
  Assert.assertEquals(1, usertableCount);
  idxtableCount = countNumberOfRows(idxTableName);
  Assert.assertEquals(2, idxtableCount);

  Get get = new Get("row1".getBytes());
  get.addColumn(Bytes.toBytes("col"), Bytes.toBytes("q1"));
  Result result = table.get(get);
  byte[] val = result.getValue(Bytes.toBytes("col"), Bytes.toBytes("q1"));
  Assert.assertEquals("myNewValue", Bytes.toString(val));
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:43,代码来源:TestIndexRegionObserver.java

示例14: testSplittingIndexRegionExplicitly

import org.apache.hadoop.hbase.index.IndexSpecification; //导入方法依赖的package包/类
@Test(timeout = 180000)
public void testSplittingIndexRegionExplicitly() throws Exception {
  Configuration conf = UTIL.getConfiguration();
  conf.setInt("hbase.regionserver.lease.period", 900000000);
  conf.setBoolean("hbase.use.secondary.index", true);
  String userTableName = "testSplitTransaction";
  String indexTableName = "testSplitTransaction_idx";
  HTableDescriptor ihtd = new HTableDescriptor(TableName.valueOf(userTableName));
  HColumnDescriptor hcd = new HColumnDescriptor("col");
  IndexSpecification iSpec = new IndexSpecification("ScanIndexf");
  iSpec.addIndexColumn(new HColumnDescriptor("col"), "ql", ValueType.String, 10);
  ihtd.addFamily(hcd);
  TableIndices indices = new TableIndices();
  indices.addIndex(iSpec);
  ihtd.setValue(Constants.INDEX_SPEC_KEY, indices.toByteArray());
  admin.createTable(ihtd);
  TableName userTable = TableName.valueOf(userTableName);
  TableName indexTable = TableName.valueOf(IndexUtils.getIndexTableName(userTableName));
  HTable table = new HTable(conf, userTableName);

  for (int i = 0; i < 10; i++) {
    String row = "row" + i;
    Put p = new Put(row.getBytes());
    String val = "Val" + i;
    p.add("col".getBytes(), "ql".getBytes(), val.getBytes());
    table.put(p);
  }

  List<HRegionInfo> regionsOfUserTable =
      UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager()
          .getRegionStates().getRegionsOfTable(userTable);

  List<HRegionInfo> regionsOfIndexTable =
      UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager()
          .getRegionStates().getRegionsOfTable(indexTable);

  // try splitting index.
  admin.split(indexTableName.getBytes());
  Thread.sleep(2000);
  regionsOfIndexTable =
      UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager()
          .getRegionStates().getRegionsOfTable(indexTable);
  Assert.assertEquals("Index table should not get splited", 1, regionsOfIndexTable.size());

  // try splitting the user region.
  admin.split(userTableName.getBytes(), "row5".getBytes());
  while (regionsOfUserTable.size() != 2) {
    Thread.sleep(2000);
    regionsOfUserTable =
        UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager()
            .getRegionStates().getRegionsOfTable(userTable);
  }
  while (regionsOfIndexTable.size() != 2) {
    Thread.sleep(2000);
    regionsOfIndexTable =
        UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager()
            .getRegionStates().getRegionsOfTable(indexTable);
  }
  Assert.assertEquals(2, regionsOfUserTable.size());
  Assert.assertEquals(2, regionsOfIndexTable.size());
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:62,代码来源:TestIndexRegionObserver.java

示例15: testComplexRangeScanWithAnd

import org.apache.hadoop.hbase.index.IndexSpecification; //导入方法依赖的package包/类
@Test(timeout = 180000)
public void testComplexRangeScanWithAnd() throws Exception {
  Configuration conf = UTIL.getConfiguration();
  String tableName = "RangeScanMetrix_2_new_id";
  IndexSpecification spec1 = new IndexSpecification("idx1");
  IndexSpecification spec2 = new IndexSpecification("idx2");
  IndexSpecification spec3 = new IndexSpecification("idx3");
  HTableDescriptor htd = new HTableDescriptor(tableName);
  // HTableDescriptor htd = new HTableDescriptor(tableName);
  HColumnDescriptor hcd = new HColumnDescriptor("cf");
  spec1.addIndexColumn(hcd, "detail", ValueType.String, 10);
  spec2.addIndexColumn(hcd, "info", ValueType.String, 10);
  spec3.addIndexColumn(hcd, "value", ValueType.String, 10);
  htd.addFamily(hcd);
  TableIndices indices = new TableIndices();
  indices.addIndex(spec1);
  indices.addIndex(spec2);
  indices.addIndex(spec3);
  String[] splitkeys = new String[9];

  for (int i = 100, j = 0; i <= 900; i += 100, j++) {
    splitkeys[j] = new Integer(i).toString();
  }
  htd.setValue(Constants.INDEX_SPEC_KEY, indices.toByteArray());
  admin.createTable(htd, Bytes.toByteArrays(splitkeys));
  String rowname = "row";
  String startrow = "";
  int keys = 0;
  List<Put> put = new ArrayList<Put>();
  for (int i = 1, j = 999; i < 1000; i++, j--) {
    if (i % 100 == 0) {
      startrow = splitkeys[keys++];
    }
    Put p = new Put(Bytes.toBytes(startrow + rowname + i));
    p.add(Bytes.toBytes("cf"), Bytes.toBytes("detail"), Bytes.toBytes(new Integer(i).toString()));
    p.add(Bytes.toBytes("cf"), Bytes.toBytes("info"), Bytes.toBytes(new Integer(j).toString()));
    p.add(Bytes.toBytes("cf"), Bytes.toBytes("value"),
      Bytes.toBytes(new Integer(i % 100).toString()));
    System.out.println(p);
    put.add(p);
  }
  HTable table = new HTable(conf, tableName);
  table.put(put);

  Scan s = new Scan();
  s.setCacheBlocks(true);
  s.setCaching(1);
  FilterList master = new FilterList(Operator.MUST_PASS_ALL);
  SingleColumnValueFilter filter1 =
      new SingleColumnValueFilter("cf".getBytes(), "detail".getBytes(), CompareOp.LESS_OR_EQUAL,
          "65".getBytes());
  filter1.setFilterIfMissing(true);
  SingleColumnValueFilter filter2 =
      new SingleColumnValueFilter("cf".getBytes(), "info".getBytes(), CompareOp.GREATER,
          "900".getBytes());
  filter2.setFilterIfMissing(true);
  SingleColumnValueFilter filter3 =
      new SingleColumnValueFilter("cf".getBytes(), "value".getBytes(),
          CompareOp.GREATER_OR_EQUAL, "5".getBytes());
  filter3.setFilterIfMissing(true);
  master.addFilter(filter1);
  master.addFilter(filter2);
  master.addFilter(filter3);
  s.setFilter(master);
  // scanOperation(s, conf, tableName);
  assertEquals("data consistency is missed ", 18, scanOperation(s, conf, tableName));
  System.out.println("Done ************");
  s = new Scan();
  s.setFilter(master);
  s.setCaching(5);
  // scanOperation(s, conf, tableName);
  assertEquals("data consistency is missed ", 18, scanOperation(s, conf, tableName));
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:74,代码来源:TestMultipleIndicesInScan.java


注:本文中的org.apache.hadoop.hbase.index.IndexSpecification.addIndexColumn方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。