当前位置: 首页>>代码示例>>Java>>正文


Java Delete.deleteColumns方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.Delete.deleteColumns方法的典型用法代码示例。如果您正苦于以下问题:Java Delete.deleteColumns方法的具体用法?Java Delete.deleteColumns怎么用?Java Delete.deleteColumns使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.client.Delete的用法示例。


在下文中一共展示了Delete.deleteColumns方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: deleteAllTs

import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
@Override
public void deleteAllTs(ByteBuffer tableName,
                        ByteBuffer row,
                        ByteBuffer column,
    long timestamp, Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
  Table table = null;
  try {
    table = getTable(tableName);
    Delete delete  = new Delete(getBytes(row));
    addAttributes(delete, attributes);
    byte [][] famAndQf = KeyValue.parseColumn(getBytes(column));
    if (famAndQf.length == 1) {
      delete.deleteFamily(famAndQf[0], timestamp);
    } else {
      delete.deleteColumns(famAndQf[0], famAndQf[1], timestamp);
    }
    table.delete(delete);

  } catch (IOException e) {
    LOG.warn(e.getMessage(), e);
    throw new IOError(Throwables.getStackTraceAsString(e));
  } finally {
    closeTable(table);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:ThriftServerRunner.java

示例2: removeRegionReplicasFromMeta

import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
/**
 * Deletes some replica columns corresponding to replicas for the passed rows
 * @param metaRows rows in hbase:meta
 * @param replicaIndexToDeleteFrom the replica ID we would start deleting from
 * @param numReplicasToRemove how many replicas to remove
 * @param connection connection we're using to access meta table
 * @throws IOException
 */
public static void removeRegionReplicasFromMeta(Set<byte[]> metaRows,
  int replicaIndexToDeleteFrom, int numReplicasToRemove, Connection connection)
    throws IOException {
  int absoluteIndex = replicaIndexToDeleteFrom + numReplicasToRemove;
  for (byte[] row : metaRows) {
    Delete deleteReplicaLocations = new Delete(row);
    for (int i = replicaIndexToDeleteFrom; i < absoluteIndex; i++) {
      deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
        getServerColumn(i));
      deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
        getSeqNumColumn(i));
      deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
        getStartCodeColumn(i));
    }
    deleteFromMetaTable(connection, deleteReplicaLocations);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:MetaTableAccessor.java

示例3: testMinorCompactionWithDeleteColumn1

import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
@Test
public void testMinorCompactionWithDeleteColumn1() throws Exception {
  Delete dc = new Delete(secondRowBytes);
  /* delete all timestamps in the column */
  dc.deleteColumns(fam2, col2);
  testMinorCompactionWithDelete(dc);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:TestMinorCompaction.java

示例4: testMinorCompactionWithDeleteVersion1

import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
@Test
public void testMinorCompactionWithDeleteVersion1() throws Exception {
  Delete deleteVersion = new Delete(secondRowBytes);
  deleteVersion.deleteColumns(fam2, col2, 2);
  /* compactionThreshold is 3. The table has 4 versions: 0, 1, 2, and 3.
   * We delete versions 0 ... 2. So, we still have one remaining.
   */
  testMinorCompactionWithDelete(deleteVersion, 1);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:10,代码来源:TestMinorCompaction.java

示例5: deleteMergeQualifiers

import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
/**
 * Deletes merge qualifiers for the specified merged region.
 * @param connection connection we're using
 * @param mergedRegion
 * @throws IOException
 */
public static void deleteMergeQualifiers(Connection connection,
                                         final HRegionInfo mergedRegion) throws IOException {
  Delete delete = new Delete(mergedRegion.getRegionName());
  delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER);
  delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER);
  deleteFromMetaTable(connection, delete);
  LOG.info("Deleted references in merged region "
    + mergedRegion.getRegionNameAsString() + ", qualifier="
    + Bytes.toStringBinary(HConstants.MERGEA_QUALIFIER) + " and qualifier="
    + Bytes.toStringBinary(HConstants.MERGEB_QUALIFIER));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:MetaTableAccessor.java

示例6: testIncompleteMetaTableReplicaInformation

import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
public void testIncompleteMetaTableReplicaInformation() throws Exception {
  final TableName table = TableName.valueOf("fooTableTest1");
  final int numRegions = 3;
  final int numReplica = 2;
  try {
    // Create a table and let the meta table be updated with the location of the
    // region locations.
    HTableDescriptor desc = new HTableDescriptor(table);
    desc.setRegionReplication(numReplica);
    desc.addFamily(new HColumnDescriptor("family"));
    ADMIN.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions);
    TEST_UTIL.waitTableEnabled(table);
    Set<byte[]> tableRows = new HashSet<byte[]>();
    List<HRegionInfo> hris = MetaTableAccessor.getTableRegions(TEST_UTIL.getZooKeeperWatcher(),
      ADMIN.getConnection(), table);
    for (HRegionInfo hri : hris) {
      tableRows.add(hri.getRegionName());
    }
    ADMIN.disableTable(table);
    // now delete one replica info from all the rows
    // this is to make the meta appear to be only partially updated
    Table metaTable = new HTable(TableName.META_TABLE_NAME, ADMIN.getConnection());
    for (byte[] row : tableRows) {
      Delete deleteOneReplicaLocation = new Delete(row);
      deleteOneReplicaLocation.deleteColumns(HConstants.CATALOG_FAMILY,
        MetaTableAccessor.getServerColumn(1));
      deleteOneReplicaLocation.deleteColumns(HConstants.CATALOG_FAMILY,
        MetaTableAccessor.getSeqNumColumn(1));
      deleteOneReplicaLocation.deleteColumns(HConstants.CATALOG_FAMILY,
        MetaTableAccessor.getStartCodeColumn(1));
      metaTable.delete(deleteOneReplicaLocation);
    }
    metaTable.close();
    // even if the meta table is partly updated, when we re-enable the table, we should
    // get back the desired number of replicas for the regions
    ADMIN.enableTable(table);
    assert(ADMIN.isTableEnabled(table));
    List<HRegionInfo> regions = TEST_UTIL.getMiniHBaseCluster().getMaster()
        .getAssignmentManager().getRegionStates().getRegionsOfTable(table);
    assert(regions.size() == numRegions * numReplica);
  } finally {
    ADMIN.disableTable(table);
    ADMIN.deleteTable(table);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:46,代码来源:TestMasterOperationsForRegionReplicas.java

示例7: testWALPlayer

import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
/**
 * Simple end-to-end test
 * @throws Exception
 */
@Test
public void testWALPlayer() throws Exception {
  final TableName TABLENAME1 = TableName.valueOf("testWALPlayer1");
  final TableName TABLENAME2 = TableName.valueOf("testWALPlayer2");
  final byte[] FAMILY = Bytes.toBytes("family");
  final byte[] COLUMN1 = Bytes.toBytes("c1");
  final byte[] COLUMN2 = Bytes.toBytes("c2");
  final byte[] ROW = Bytes.toBytes("row");
  Table t1 = TEST_UTIL.createTable(TABLENAME1, FAMILY);
  Table t2 = TEST_UTIL.createTable(TABLENAME2, FAMILY);

  // put a row into the first table
  Put p = new Put(ROW);
  p.add(FAMILY, COLUMN1, COLUMN1);
  p.add(FAMILY, COLUMN2, COLUMN2);
  t1.put(p);
  // delete one column
  Delete d = new Delete(ROW);
  d.deleteColumns(FAMILY, COLUMN1);
  t1.delete(d);

  // replay the WAL, map table 1 to table 2
  WAL log = cluster.getRegionServer(0).getWAL(null);
  log.rollWriter();
  String walInputDir = new Path(cluster.getMaster().getMasterFileSystem()
      .getRootDir(), HConstants.HREGION_LOGDIR_NAME).toString();

  Configuration configuration= TEST_UTIL.getConfiguration();
  WALPlayer player = new WALPlayer(configuration);
  String optionName="_test_.name";
  configuration.set(optionName, "1000");
  player.setupTime(configuration, optionName);
  assertEquals(1000,configuration.getLong(optionName,0));
  assertEquals(0, player.run(new String[] {walInputDir, TABLENAME1.getNameAsString(),
      TABLENAME2.getNameAsString() }));

  
  // verify the WAL was player into table 2
  Get g = new Get(ROW);
  Result r = t2.get(g);
  assertEquals(1, r.size());
  assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN2));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:48,代码来源:TestWALPlayer.java

示例8: testHbckWithExcessReplica

import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
@Test
public void testHbckWithExcessReplica() throws Exception {
  TableName table =
      TableName.valueOf("testHbckWithExcessReplica");
  try {
    setupTableWithRegionReplica(table, 2);
    TEST_UTIL.getHBaseAdmin().flush(table.getName());
    assertNoErrors(doFsck(conf, false));
    assertEquals(ROWKEYS.length, countRows());
    // the next few lines inject a location in meta for a replica, and then
    // asks the master to assign the replica (the meta needs to be injected
    // for the master to treat the request for assignment as valid; the master
    // checks the region is valid either from its memory or meta)
    HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
    List<HRegionInfo> regions = TEST_UTIL.getHBaseAdmin().getTableRegions(table);
    byte[] startKey = Bytes.toBytes("B");
    byte[] endKey = Bytes.toBytes("C");
    byte[] metaKey = null;
    HRegionInfo newHri = null;
    for (HRegionInfo h : regions) {
      if (Bytes.compareTo(h.getStartKey(), startKey) == 0  &&
          Bytes.compareTo(h.getEndKey(), endKey) == 0 &&
          h.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
        metaKey = h.getRegionName();
        //create a hri with replicaId as 2 (since we already have replicas with replicaid 0 and 1)
        newHri = RegionReplicaUtil.getRegionInfoForReplica(h, 2);
        break;
      }
    }
    Put put = new Put(metaKey);
    ServerName sn = TEST_UTIL.getHBaseAdmin().getClusterStatus().getServers()
        .toArray(new ServerName[0])[0];
    //add a location with replicaId as 2 (since we already have replicas with replicaid 0 and 1)
    MetaTableAccessor.addLocation(put, sn, sn.getStartcode(), -1, 2);
    meta.put(put);
    meta.flushCommits();
    // assign the new replica
    HBaseFsckRepair.fixUnassigned((HBaseAdmin)TEST_UTIL.getHBaseAdmin(), newHri);
    HBaseFsckRepair.waitUntilAssigned((HBaseAdmin)TEST_UTIL.getHBaseAdmin(), newHri);
    // now reset the meta row to its original value
    Delete delete = new Delete(metaKey);
    delete.deleteColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getServerColumn(2));
    delete.deleteColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getStartCodeColumn(2));
    delete.deleteColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getSeqNumColumn(2));
    meta.delete(delete);
    meta.flushCommits();
    meta.close();
    // check that problem exists
    HBaseFsck hbck = doFsck(conf, false);
    assertErrors(hbck, new ERROR_CODE[]{ERROR_CODE.NOT_IN_META});
    // fix the problem
    hbck = doFsck(conf, true);
    // run hbck again to make sure we don't see any errors
    hbck = doFsck(conf, false);
    assertErrors(hbck, new ERROR_CODE[]{});
  } finally {
    cleanupTable(table);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:60,代码来源:TestHBaseFsck.java

示例9: testDeleteColumns_PostInsert

import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
@Test
public void testDeleteColumns_PostInsert() throws IOException, InterruptedException {
  Delete delete = new Delete(row);
  delete.deleteColumns(fam1, qual1);
  doTestDelete_AndPostInsert(delete);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:7,代码来源:TestHRegion.java

示例10: testScanAndConcurrentMajorCompact

import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
/**
 * Make sure scanner returns correct result when we run a major compaction
 * with deletes.
 *
 * @throws Exception
 */
@Test
@SuppressWarnings("deprecation")
public void testScanAndConcurrentMajorCompact() throws Exception {
  HTableDescriptor htd = TEST_UTIL.createTableDescriptor(name.getMethodName());
  this.r = TEST_UTIL.createLocalHRegion(htd, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);

  try {
    HBaseTestCase.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1),
        firstRowBytes, secondRowBytes);
    HBaseTestCase.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1),
        firstRowBytes, secondRowBytes);

    Delete dc = new Delete(firstRowBytes);
    /* delete column1 of firstRow */
    dc.deleteColumns(fam1, col1);
    r.delete(dc);
    r.flush(true);

    HBaseTestCase.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1),
        secondRowBytes, thirdRowBytes);
    HBaseTestCase.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1),
        secondRowBytes, thirdRowBytes);
    r.flush(true);

    InternalScanner s = r.getScanner(new Scan());
    // run a major compact, column1 of firstRow will be cleaned.
    r.compact(true);

    List<Cell> results = new ArrayList<Cell>();
    s.next(results);

    // make sure returns column2 of firstRow
    assertTrue("result is not correct, keyValues : " + results,
        results.size() == 1);
    assertTrue(CellUtil.matchingRow(results.get(0), firstRowBytes)); 
    assertTrue(CellUtil.matchingFamily(results.get(0), fam2));

    results = new ArrayList<Cell>();
    s.next(results);

    // get secondRow
    assertTrue(results.size() == 2);
    assertTrue(CellUtil.matchingRow(results.get(0), secondRowBytes));
    assertTrue(CellUtil.matchingFamily(results.get(0), fam1));
    assertTrue(CellUtil.matchingFamily(results.get(1), fam2));
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:57,代码来源:TestScanner.java


注:本文中的org.apache.hadoop.hbase.client.Delete.deleteColumns方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。