当前位置: 首页>>代码示例>>Java>>正文


Java HBaseAdmin.flush方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.HBaseAdmin.flush方法的典型用法代码示例。如果您正苦于以下问题:Java HBaseAdmin.flush方法的具体用法?Java HBaseAdmin.flush怎么用?Java HBaseAdmin.flush使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.client.HBaseAdmin的用法示例。


在下文中一共展示了HBaseAdmin.flush方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: prepareData

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
private Store prepareData() throws IOException {
  HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }
  HTable table = TEST_UTIL.createTable(tableName, family);
  Random rand = new Random();
  for (int i = 0; i < 10; i++) {
    for (int j = 0; j < 10; j++) {
      byte[] value = new byte[128 * 1024];
      rand.nextBytes(value);
      table.put(new Put(Bytes.toBytes(i * 10 + j)).add(family, qualifier, value));
    }
    admin.flush(tableName);
  }
  return getStoreWithName(tableName);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestCompactionWithThroughputController.java

示例2: insertData

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
private void insertData(final byte[] tableName, HBaseAdmin admin, HTable t) throws IOException,
    InterruptedException {
  Put p = new Put(Bytes.toBytes("row1"));
  p.add(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("1"));
  t.put(p);
  p = new Put(Bytes.toBytes("row2"));
  p.add(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("2"));
  t.put(p);
  p = new Put(Bytes.toBytes("row3"));
  p.add(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("3"));
  t.put(p);
  p = new Put(Bytes.toBytes("row4"));
  p.add(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("4"));
  t.put(p);
  admin.flush(tableName);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:17,代码来源:TestSplitTransactionOnCluster.java

示例3: insertData

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
private void insertData(final TableName tableName, HBaseAdmin admin, Table t) throws IOException,
    InterruptedException {
  Put p = new Put(Bytes.toBytes("row1"));
  p.add(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("1"));
  t.put(p);
  p = new Put(Bytes.toBytes("row2"));
  p.add(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("2"));
  t.put(p);
  p = new Put(Bytes.toBytes("row3"));
  p.add(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("3"));
  t.put(p);
  p = new Put(Bytes.toBytes("row4"));
  p.add(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("4"));
  t.put(p);
  admin.flush(tableName);
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:17,代码来源:TestSplitTransactionOnCluster.java

示例4: perform

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
@Override
public void perform() throws Exception {
  HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
  HBaseAdmin admin = util.getHBaseAdmin();

  LOG.info("Performing action: Flush random region of table " + tableName);
  List<HRegionInfo> regions = admin.getTableRegions(tableNameBytes);
  if (regions == null || regions.isEmpty()) {
    LOG.info("Table " + tableName + " doesn't have regions to flush");
    return;
  }

  HRegionInfo region = PolicyBasedChaosMonkey.selectRandomItem(
    regions.toArray(new HRegionInfo[regions.size()]));
  LOG.debug("Flushing region " + region.getRegionNameAsString());
  try {
    admin.flush(region.getRegionName());
  } catch (Exception ex) {
    LOG.warn("Flush failed, might be caused by other chaos: " + ex.getMessage());
  }
  if (sleepTime > 0) {
    Thread.sleep(sleepTime);
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:25,代码来源:FlushRandomRegionOfTableAction.java

示例5: generateHBaseDatasetDoubleOB

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
public static void generateHBaseDatasetDoubleOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (double i = 0.5; i <= 100.00; i += 0.75) {
      byte[] bytes = new byte[9];
      org.apache.hadoop.hbase.util.PositionedByteRange br =
              new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
      org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat64(br, i,
              org.apache.hadoop.hbase.util.Order.ASCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:34,代码来源:TestTableGenerator.java

示例6: generateHBaseDatasetFloatOB

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
public static void generateHBaseDatasetFloatOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (float i = (float)0.5; i <= 100.00; i += 0.75) {
    byte[] bytes = new byte[5];
    org.apache.hadoop.hbase.util.PositionedByteRange br =
            new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 5);
    org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat32(br, i,
            org.apache.hadoop.hbase.util.Order.ASCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:34,代码来源:TestTableGenerator.java

示例7: generateHBaseDatasetBigIntOB

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
public static void generateHBaseDatasetBigIntOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

 HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY_F));

if (numberRegions > 1) {
  admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
} else {
  admin.createTable(desc);
}

HTable table = new HTable(admin.getConfiguration(), tableName);
long startTime = (long)1438034423 * 1000;
for (long i = startTime; i <= startTime + 100; i ++) {
  byte[] bytes = new byte[9];
  org.apache.hadoop.hbase.util.PositionedByteRange br =
          new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
  org.apache.hadoop.hbase.util.OrderedBytes.encodeInt64(br, i,
          org.apache.hadoop.hbase.util.Order.ASCENDING);
  Put p = new Put(bytes);
  p.add(FAMILY_F, COLUMN_C, String.format("value %d", i).getBytes());
  table.put(p);
}

table.flushCommits();
table.close();

admin.flush(tableName);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:34,代码来源:TestTableGenerator.java

示例8: generateHBaseDatasetIntOB

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
public static void generateHBaseDatasetIntOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (int i = -49; i <= 100; i ++) {
    byte[] bytes = new byte[5];
    org.apache.hadoop.hbase.util.PositionedByteRange br =
            new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 5);
    org.apache.hadoop.hbase.util.OrderedBytes.encodeInt32(br, i,
            org.apache.hadoop.hbase.util.Order.ASCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %d", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:34,代码来源:TestTableGenerator.java

示例9: generateHBaseDatasetDoubleOBDesc

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
public static void generateHBaseDatasetDoubleOBDesc(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (double i = 0.5; i <= 100.00; i += 0.75) {
      byte[] bytes = new byte[9];
      org.apache.hadoop.hbase.util.PositionedByteRange br =
              new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
      org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat64(br, i,
              org.apache.hadoop.hbase.util.Order.DESCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:34,代码来源:TestTableGenerator.java

示例10: generateHBaseDatasetFloatOBDesc

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
public static void generateHBaseDatasetFloatOBDesc(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (float i = (float)0.5; i <= 100.00; i += 0.75) {
    byte[] bytes = new byte[5];
    org.apache.hadoop.hbase.util.PositionedByteRange br =
            new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 5);
    org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat32(br, i,
            org.apache.hadoop.hbase.util.Order.DESCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:34,代码来源:TestTableGenerator.java

示例11: generateHBaseDatasetBigIntOBDesc

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
public static void generateHBaseDatasetBigIntOBDesc(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

 HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY_F));

if (numberRegions > 1) {
  admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
} else {
  admin.createTable(desc);
}

HTable table = new HTable(admin.getConfiguration(), tableName);
long startTime = (long)1438034423 * 1000;
for (long i = startTime; i <= startTime + 100; i ++) {
  byte[] bytes = new byte[9];
  org.apache.hadoop.hbase.util.PositionedByteRange br =
          new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
  org.apache.hadoop.hbase.util.OrderedBytes.encodeInt64(br, i,
          org.apache.hadoop.hbase.util.Order.DESCENDING);
  Put p = new Put(bytes);
  p.add(FAMILY_F, COLUMN_C, String.format("value %d", i).getBytes());
  table.put(p);
}

table.flushCommits();
table.close();

admin.flush(tableName);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:34,代码来源:TestTableGenerator.java

示例12: generateHBaseDatasetIntOBDesc

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
public static void generateHBaseDatasetIntOBDesc(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (int i = -49; i <= 100; i ++) {
    byte[] bytes = new byte[5];
    org.apache.hadoop.hbase.util.PositionedByteRange br =
            new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 5);
    org.apache.hadoop.hbase.util.OrderedBytes.encodeInt32(br, i,
            org.apache.hadoop.hbase.util.Order.DESCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %d", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:34,代码来源:TestTableGenerator.java

示例13: testBulkLoad

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
@Test
public void testBulkLoad() throws Exception {
  TableName tableName = TableName.valueOf("testBulkLoad");
  long l = System.currentTimeMillis();
  HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
  createTable(admin, tableName);
  Scan scan = createScan();
  final HTable table = init(admin, l, scan, tableName);
  // use bulkload
  final Path hfilePath = writeToHFile(l, "/temp/testBulkLoad/", "/temp/testBulkLoad/col/file",
    false);
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setBoolean("hbase.mapreduce.bulkload.assign.sequenceNumbers", true);
  final LoadIncrementalHFiles bulkload = new LoadIncrementalHFiles(conf);
  bulkload.doBulkLoad(hfilePath, table);
  ResultScanner scanner = table.getScanner(scan);
  Result result = scanner.next();
  result = scanAfterBulkLoad(scanner, result, "version2");
  Put put0 = new Put(Bytes.toBytes("row1"));
  put0.add(new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l, Bytes
      .toBytes("version3")));
  table.put(put0);
  admin.flush(tableName);
  scanner = table.getScanner(scan);
  result = scanner.next();
  while (result != null) {
    List<KeyValue> kvs = result.getColumn(Bytes.toBytes("col"), Bytes.toBytes("q"));
    for (KeyValue _kv : kvs) {
      if (Bytes.toString(_kv.getRow()).equals("row1")) {
        System.out.println(Bytes.toString(_kv.getRow()));
        System.out.println(Bytes.toString(_kv.getQualifier()));
        System.out.println(Bytes.toString(_kv.getValue()));
        Assert.assertEquals("version3", Bytes.toString(_kv.getValue()));
      }
    }
    result = scanner.next();
  }
  scanner.close();
  table.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:41,代码来源:TestScannerWithBulkload.java

示例14: prepareData

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
private Store prepareData() throws IOException {
  HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }
  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.setConfiguration(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, 
    FIFOCompactionPolicy.class.getName());
  desc.setConfiguration(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, 
    DisabledRegionSplitPolicy.class.getName());
  HColumnDescriptor colDesc = new HColumnDescriptor(family);
  colDesc.setTimeToLive(1); // 1 sec
  desc.addFamily(colDesc);

  admin.createTable(desc);
  Table table = TEST_UTIL.getConnection().getTable(tableName);
  Random rand = new Random();
  TimeOffsetEnvironmentEdge edge =
      (TimeOffsetEnvironmentEdge) EnvironmentEdgeManager.getDelegate();
  for (int i = 0; i < 10; i++) {
    for (int j = 0; j < 10; j++) {
      byte[] value = new byte[128 * 1024];
      rand.nextBytes(value);
      table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
    }
    admin.flush(tableName);
    edge.increment(1001);
  }
  return getStoreWithName(tableName);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:32,代码来源:TestFIFOCompactionPolicy.java

示例15: flushAndBlockUntilDone

import org.apache.hadoop.hbase.client.HBaseAdmin; //导入方法依赖的package包/类
public static void flushAndBlockUntilDone(HBaseAdmin admin, HRegionServer rs, byte[] regionName)
    throws IOException, InterruptedException {
  log("flushing region: " + Bytes.toStringBinary(regionName));
  admin.flush(regionName);
  log("blocking until flush is complete: " + Bytes.toStringBinary(regionName));
  Threads.sleepWithoutInterrupt(500);
  while (rs.cacheFlusher.getFlushQueueSize() > 0) {
    Threads.sleep(50);
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:11,代码来源:TestEndToEndSplitTransaction.java


注:本文中的org.apache.hadoop.hbase.client.HBaseAdmin.flush方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。