當前位置: 首頁>>代碼示例>>Java>>正文


Java Table.delete方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.client.Table.delete方法的典型用法代碼示例。如果您正苦於以下問題:Java Table.delete方法的具體用法?Java Table.delete怎麽用?Java Table.delete使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.client.Table的用法示例。


在下文中一共展示了Table.delete方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: deleteAllTs

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
@Override
public void deleteAllTs(ByteBuffer tableName,
                        ByteBuffer row,
                        ByteBuffer column,
    long timestamp, Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
  Table table = null;
  try {
    table = getTable(tableName);
    Delete delete  = new Delete(getBytes(row));
    addAttributes(delete, attributes);
    byte [][] famAndQf = KeyValue.parseColumn(getBytes(column));
    if (famAndQf.length == 1) {
      delete.deleteFamily(famAndQf[0], timestamp);
    } else {
      delete.deleteColumns(famAndQf[0], famAndQf[1], timestamp);
    }
    table.delete(delete);

  } catch (IOException e) {
    LOG.warn(e.getMessage(), e);
    throw new IOError(Throwables.getStackTraceAsString(e));
  } finally {
    closeTable(table);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:26,代碼來源:ThriftServerRunner.java

示例2: deleteAllRowTs

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
@Override
public void deleteAllRowTs(
    ByteBuffer tableName, ByteBuffer row, long timestamp,
    Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
  Table table = null;
  try {
    table = getTable(tableName);
    Delete delete  = new Delete(getBytes(row), timestamp);
    addAttributes(delete, attributes);
    table.delete(delete);
  } catch (IOException e) {
    LOG.warn(e.getMessage(), e);
    throw new IOError(Throwables.getStackTraceAsString(e));
  } finally {
    closeTable(table);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:18,代碼來源:ThriftServerRunner.java

示例3: deleteAndWait

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
private void deleteAndWait(byte[] row, Table source, Table... targets)
throws Exception {
  Delete del = new Delete(row);
  source.delete(del);

  Get get = new Get(row);
  for (int i = 0; i < NB_RETRIES; i++) {
    if (i==NB_RETRIES-1) {
      fail("Waited too much time for del replication");
    }
    boolean removedFromAll = true;
    for (Table target : targets) {
      Result res = target.get(get);
      if (res.size() >= 1) {
        LOG.info("Row not deleted");
        removedFromAll = false;
        break;
      }
    }
    if (removedFromAll) {
      break;
    } else {
      Thread.sleep(SLEEP_TIME);
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:27,代碼來源:TestMultiSlaveReplication.java

示例4: doDelete

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
/**
 * Does the delete using the passed tableInterface but leaves it open
 *
 * @param tableInterface
 * @param delete
 */
public static void doDelete(final Table tableInterface, final Delete delete) {
    try {
        tableInterface.delete(delete);
    } catch (final Exception e) {
        closeTable(tableInterface);
        throw new HBaseException(e.getMessage(), e);
    }
}
 
開發者ID:gchq,項目名稱:stroom-stats,代碼行數:15,代碼來源:HBaseTable.java

示例5: run

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
@Override
public int run(String[] args) throws Exception {
  if (args.length != 1) {
    System.out.println("Usage : " + Delete.class.getSimpleName() + " <node to delete>");
    return 0;
  }
  byte[] val = Bytes.toBytesBinary(args[0]);

  org.apache.hadoop.hbase.client.Delete delete
    = new org.apache.hadoop.hbase.client.Delete(val);

  Table table = new HTable(getConf(), getTableName(getConf()));
  table.delete(delete);
  table.close();

  System.out.println("Delete successful");
  return 0;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:19,代碼來源:IntegrationTestBigLinkedList.java

示例6: delRecord

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
/**
 * delete record
 */
public static void delRecord(String tableName, String rowKey)
        throws IOException {
    Table table = connection.getTable(TableName.valueOf(tableName));
    List list = new ArrayList();
    Delete del = new Delete(rowKey.getBytes());
    list.add(del);
    table.delete(list);
    System.out.println("del recored " + rowKey + " ok.");
}
 
開發者ID:yjp123456,項目名稱:SparkDemo,代碼行數:13,代碼來源:HBaseTest.java

示例7: deleteAllRows

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
private void deleteAllRows(final Table table) throws IOException {
    final Scan scan = new Scan();
    final List<Delete> deleteList = new ArrayList<>();
    final ResultScanner results = table.getScanner(scan);
    for (final Result result : results) {
        deleteList.add(new Delete(result.getRow()));
    }
    results.close();
    table.delete(deleteList);
}
 
開發者ID:gchq,項目名稱:stroom-stats,代碼行數:11,代碼來源:StatisticsTestService.java

示例8: deleteMultiple

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
@Override
public List<TDelete> deleteMultiple(ByteBuffer table, List<TDelete> deletes) throws TIOError,
    TException {
  Table htable = getTable(table);
  try {
    htable.delete(deletesFromThrift(deletes));
  } catch (IOException e) {
    throw getTIOError(e);
  } finally {
    closeTable(htable);
  }
  return Collections.emptyList();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:14,代碼來源:ThriftHBaseServiceHandler.java

示例9: deleteAndWaitWithFamily

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
private void deleteAndWaitWithFamily(byte[] row, byte[] fam,
    Table source, Table... targets)
  throws Exception {
  Delete del = new Delete(row);
  del.deleteFamily(fam);
  source.delete(del);

  Get get = new Get(row);
  get.addFamily(fam);
  for (int i = 0; i < NB_RETRIES; i++) {
    if (i==NB_RETRIES-1) {
      fail("Waited too much time for del replication");
    }
    boolean removedFromAll = true;
    for (Table target : targets) {
      Result res = target.get(get);
      if (res.size() >= 1) {
        LOG.info("Row not deleted");
        removedFromAll = false;
        break;
      }
    }
    if (removedFromAll) {
      break;
    } else {
      Thread.sleep(SLEEP_TIME);
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:30,代碼來源:TestPerTableCFReplication.java

示例10: fillTable

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
private void fillTable() throws IOException, InterruptedException {
  Table table = TEST_UTIL.createTable(TABLE_NAME, FAMILIES, 3,
      Bytes.toBytes("row0"), Bytes.toBytes("row99"), NUM_RS);
  Random rand = new Random(19387129L);
  for (int iStoreFile = 0; iStoreFile < 4; ++iStoreFile) {
    for (int iRow = 0; iRow < 100; ++iRow) {
      final byte[] row = Bytes.toBytes("row" + iRow);
      Put put = new Put(row);
      Delete del = new Delete(row);
      for (int iCol = 0; iCol < 10; ++iCol) {
        final byte[] cf = rand.nextBoolean() ? CF1 : CF2;
        final long ts = Math.abs(rand.nextInt());
        final byte[] qual = Bytes.toBytes("col" + iCol);
        if (rand.nextBoolean()) {
          final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
              "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
              ts + "_random_" + rand.nextLong());
          put.add(cf, qual, ts, value);
        } else if (rand.nextDouble() < 0.8) {
          del.addColumn(cf, qual, ts);
        } else {
          del.addColumn(cf, qual, ts);
        }
      }
      table.put(put);
      table.delete(del);
    }
  }
  TEST_UTIL.waitUntilAllRegionsAssigned(TABLE_NAME);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:31,代碼來源:TestWALFiltering.java

示例11: deleteFromMetaTable

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
/**
 * Delete the passed <code>deletes</code> from the <code>hbase:meta</code> table.
 * @param connection connection we're using
 * @param deletes Deletes to add to hbase:meta  This list should support #remove.
 * @throws IOException
 */
public static void deleteFromMetaTable(final Connection connection, final List<Delete> deletes)
  throws IOException {
  Table t = getMetaHTable(connection);
  try {
    t.delete(deletes);
  } finally {
    t.close();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:16,代碼來源:MetaTableAccessor.java

示例12: checkRowAndDelete

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
private void checkRowAndDelete(Table t, byte[] row, int count) throws IOException {
  Get g = new Get(row);
  Result r = t.get(g);
  assertEquals(count, r.size());
  Delete d = new Delete(row);
  t.delete(d);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:8,代碼來源:TestRegionObserverBypass.java

示例13: doIndexDelete

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
private void doIndexDelete(final List<IndexDelete> deletes) throws IOException {
  Table temptable = null;
  for (IndexDelete delete : deletes) {
    for (Map.Entry<byte[], Delete> entry : delete.getDeletes().entrySet()) {
      temptable = indexTableMaps.get(entry.getKey());
      temptable.delete(entry.getValue());
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:10,代碼來源:IndexTable.java

示例14: deleteAndWait

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
private void deleteAndWait(byte[] row, Table source, Table target)
    throws Exception {
  Delete del = new Delete(row);
  source.delete(del);
  wait(row, target, true);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:7,代碼來源:TestMasterReplication.java

示例15: testIncompleteMetaTableReplicaInformation

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
public void testIncompleteMetaTableReplicaInformation() throws Exception {
  final TableName table = TableName.valueOf("fooTableTest1");
  final int numRegions = 3;
  final int numReplica = 2;
  try {
    // Create a table and let the meta table be updated with the location of the
    // region locations.
    HTableDescriptor desc = new HTableDescriptor(table);
    desc.setRegionReplication(numReplica);
    desc.addFamily(new HColumnDescriptor("family"));
    ADMIN.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions);
    TEST_UTIL.waitTableEnabled(table);
    Set<byte[]> tableRows = new HashSet<byte[]>();
    List<HRegionInfo> hris = MetaTableAccessor.getTableRegions(TEST_UTIL.getZooKeeperWatcher(),
      ADMIN.getConnection(), table);
    for (HRegionInfo hri : hris) {
      tableRows.add(hri.getRegionName());
    }
    ADMIN.disableTable(table);
    // now delete one replica info from all the rows
    // this is to make the meta appear to be only partially updated
    Table metaTable = new HTable(TableName.META_TABLE_NAME, ADMIN.getConnection());
    for (byte[] row : tableRows) {
      Delete deleteOneReplicaLocation = new Delete(row);
      deleteOneReplicaLocation.deleteColumns(HConstants.CATALOG_FAMILY,
        MetaTableAccessor.getServerColumn(1));
      deleteOneReplicaLocation.deleteColumns(HConstants.CATALOG_FAMILY,
        MetaTableAccessor.getSeqNumColumn(1));
      deleteOneReplicaLocation.deleteColumns(HConstants.CATALOG_FAMILY,
        MetaTableAccessor.getStartCodeColumn(1));
      metaTable.delete(deleteOneReplicaLocation);
    }
    metaTable.close();
    // even if the meta table is partly updated, when we re-enable the table, we should
    // get back the desired number of replicas for the regions
    ADMIN.enableTable(table);
    assert(ADMIN.isTableEnabled(table));
    List<HRegionInfo> regions = TEST_UTIL.getMiniHBaseCluster().getMaster()
        .getAssignmentManager().getRegionStates().getRegionsOfTable(table);
    assert(regions.size() == numRegions * numReplica);
  } finally {
    ADMIN.disableTable(table);
    ADMIN.deleteTable(table);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:46,代碼來源:TestMasterOperationsForRegionReplicas.java


注:本文中的org.apache.hadoop.hbase.client.Table.delete方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。