当前位置: 首页>>代码示例>>Java>>正文


Java HTable.flushCommits方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.HTable.flushCommits方法的典型用法代码示例。如果您正苦于以下问题:Java HTable.flushCommits方法的具体用法?Java HTable.flushCommits怎么用?Java HTable.flushCommits使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.client.HTable的用法示例。


在下文中一共展示了HTable.flushCommits方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: putLob

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
/**
 * 上传对象到LOB
 * @param tableName Hyperbase表名
 * @param row rowkey byte形式
 * @param filename 文件名
 * @param fileData 文件
 */
public void putLob(String tableName, String row, String filename, byte[] fileData){
    byte[] rowkey = Bytes.toBytes(row);
    try {
        HTable htable = new HTable(conf, tableName);
        Put put = new Put(rowkey);
        put.add(Bytes.toBytes(family1), Bytes.toBytes(f1_q1), Bytes.toBytes(filename));
        put.add(Bytes.toBytes(family2), Bytes.toBytes(f2_q1), fileData);
        htable.put(put);
        htable.flushCommits();
        htable.close();
    } catch (IOException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }
}
 
开发者ID:Transwarp-DE,项目名称:Transwarp-Sample-Code,代码行数:23,代码来源:LobUtil.java

示例2: setupTableWithRegionReplica

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
/**
 * Setup a clean table with a certain region_replica count
 *
 * It will set tbl which needs to be closed after test
 *
 * @param tableName
 * @param replicaCount
 * @throws Exception
 */
void setupTableWithRegionReplica(TableName tablename, int replicaCount) throws Exception {
  HTableDescriptor desc = new HTableDescriptor(tablename);
  desc.setRegionReplication(replicaCount);
  HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM));
  desc.addFamily(hcd); // If a table has no CF's it doesn't get checked
  createTable(TEST_UTIL, desc, SPLITS);

  tbl = (HTable) connection.getTable(tablename, tableExecutorService);
  List<Put> puts = new ArrayList<Put>();
  for (byte[] row : ROWKEYS) {
    Put p = new Put(row);
    p.add(FAM, Bytes.toBytes("val"), row);
    puts.add(p);
  }
  tbl.put(puts);
  tbl.flushCommits();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestHBaseFsck.java

示例3: generateHBaseDatasetCompositeKeyDate

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
public static void generateHBaseDatasetCompositeKeyDate(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  Date startDate = new Date(1408924800000L);
  long startTime  = startDate.getTime();
  long MILLISECONDS_IN_A_DAY  = (long)1000 * 60 * 60 * 24;
  long MILLISECONDS_IN_A_YEAR = MILLISECONDS_IN_A_DAY * 365;
  long endTime    = startTime + MILLISECONDS_IN_A_YEAR;
  long interval   = MILLISECONDS_IN_A_DAY / 3;

  for (long ts = startTime, counter = 0; ts < endTime; ts += interval, counter ++) {
    byte[] rowKey = ByteBuffer.allocate(16) .putLong(ts).array();

    for(int i = 0; i < 8; ++i) {
      rowKey[8 + i] = (byte)(counter >> (56 - (i * 8)));
    }

    Put p = new Put(rowKey);
    p.add(FAMILY_F, COLUMN_C, "dummy".getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:40,代码来源:TestTableGenerator.java

示例4: generateHBaseDatasetCompositeKeyInt

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
public static void generateHBaseDatasetCompositeKeyInt(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  int startVal = 0;
  int stopVal = 1000;
  int interval = 47;
  long counter = 0;
  for (int i = startVal; i < stopVal; i += interval, counter ++) {
    byte[] rowKey = ByteBuffer.allocate(12).putInt(i).array();

    for(int j = 0; j < 8; ++j) {
      rowKey[4 + j] = (byte)(counter >> (56 - (j * 8)));
    }

    Put p = new Put(rowKey);
    p.add(FAMILY_F, COLUMN_C, "dummy".getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:37,代码来源:TestTableGenerator.java

示例5: generateHBaseDatasetDoubleOB

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
public static void generateHBaseDatasetDoubleOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (double i = 0.5; i <= 100.00; i += 0.75) {
      byte[] bytes = new byte[9];
      org.apache.hadoop.hbase.util.PositionedByteRange br =
              new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
      org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat64(br, i,
              org.apache.hadoop.hbase.util.Order.ASCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:34,代码来源:TestTableGenerator.java

示例6: generateHBaseDatasetFloatOB

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
public static void generateHBaseDatasetFloatOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (float i = (float)0.5; i <= 100.00; i += 0.75) {
    byte[] bytes = new byte[5];
    org.apache.hadoop.hbase.util.PositionedByteRange br =
            new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 5);
    org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat32(br, i,
            org.apache.hadoop.hbase.util.Order.ASCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:34,代码来源:TestTableGenerator.java

示例7: generateHBaseDatasetBigIntOB

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
public static void generateHBaseDatasetBigIntOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

 HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY_F));

if (numberRegions > 1) {
  admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
} else {
  admin.createTable(desc);
}

HTable table = new HTable(admin.getConfiguration(), tableName);
long startTime = (long)1438034423 * 1000;
for (long i = startTime; i <= startTime + 100; i ++) {
  byte[] bytes = new byte[9];
  org.apache.hadoop.hbase.util.PositionedByteRange br =
          new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
  org.apache.hadoop.hbase.util.OrderedBytes.encodeInt64(br, i,
          org.apache.hadoop.hbase.util.Order.ASCENDING);
  Put p = new Put(bytes);
  p.add(FAMILY_F, COLUMN_C, String.format("value %d", i).getBytes());
  table.put(p);
}

table.flushCommits();
table.close();

admin.flush(tableName);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:34,代码来源:TestTableGenerator.java

示例8: generateHBaseDatasetIntOB

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
public static void generateHBaseDatasetIntOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (int i = -49; i <= 100; i ++) {
    byte[] bytes = new byte[5];
    org.apache.hadoop.hbase.util.PositionedByteRange br =
            new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 5);
    org.apache.hadoop.hbase.util.OrderedBytes.encodeInt32(br, i,
            org.apache.hadoop.hbase.util.Order.ASCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %d", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:34,代码来源:TestTableGenerator.java

示例9: generateHBaseDatasetDoubleOBDesc

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
public static void generateHBaseDatasetDoubleOBDesc(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (double i = 0.5; i <= 100.00; i += 0.75) {
      byte[] bytes = new byte[9];
      org.apache.hadoop.hbase.util.PositionedByteRange br =
              new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
      org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat64(br, i,
              org.apache.hadoop.hbase.util.Order.DESCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:34,代码来源:TestTableGenerator.java

示例10: generateHBaseDatasetFloatOBDesc

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
public static void generateHBaseDatasetFloatOBDesc(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (float i = (float)0.5; i <= 100.00; i += 0.75) {
    byte[] bytes = new byte[5];
    org.apache.hadoop.hbase.util.PositionedByteRange br =
            new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 5);
    org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat32(br, i,
            org.apache.hadoop.hbase.util.Order.DESCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:34,代码来源:TestTableGenerator.java

示例11: generateHBaseDatasetBigIntOBDesc

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
public static void generateHBaseDatasetBigIntOBDesc(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

 HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY_F));

if (numberRegions > 1) {
  admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
} else {
  admin.createTable(desc);
}

HTable table = new HTable(admin.getConfiguration(), tableName);
long startTime = (long)1438034423 * 1000;
for (long i = startTime; i <= startTime + 100; i ++) {
  byte[] bytes = new byte[9];
  org.apache.hadoop.hbase.util.PositionedByteRange br =
          new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
  org.apache.hadoop.hbase.util.OrderedBytes.encodeInt64(br, i,
          org.apache.hadoop.hbase.util.Order.DESCENDING);
  Put p = new Put(bytes);
  p.add(FAMILY_F, COLUMN_C, String.format("value %d", i).getBytes());
  table.put(p);
}

table.flushCommits();
table.close();

admin.flush(tableName);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:34,代码来源:TestTableGenerator.java

示例12: generateHBaseDatasetIntOBDesc

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
public static void generateHBaseDatasetIntOBDesc(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (int i = -49; i <= 100; i ++) {
    byte[] bytes = new byte[5];
    org.apache.hadoop.hbase.util.PositionedByteRange br =
            new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 5);
    org.apache.hadoop.hbase.util.OrderedBytes.encodeInt32(br, i,
            org.apache.hadoop.hbase.util.Order.DESCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %d", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:34,代码来源:TestTableGenerator.java

示例13: testOveralyOnOtherCluster

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
@Test
public void testOveralyOnOtherCluster() throws Exception {
  // just run HDFS
  HBaseTestingUtility util1 = new HBaseTestingUtility();
  MiniDFSCluster dfs = util1.startMiniDFSCluster(1);

  // run HBase on that HDFS
  HBaseTestingUtility util2 = new HBaseTestingUtility();
  // set the dfs
  util2.setDFSCluster(dfs, false);
  util2.startMiniCluster();

  //ensure that they are pointed at the same place
  FileSystem fs = dfs.getFileSystem();
  FileSystem targetFs = util2.getDFSCluster().getFileSystem();
  assertFsSameUri(fs, targetFs);

  fs = FileSystem.get(util1.getConfiguration());
  targetFs = FileSystem.get(util2.getConfiguration());
  assertFsSameUri(fs, targetFs);

  Path randomFile = new Path("/"+UUID.randomUUID());
  assertTrue(targetFs.createNewFile(randomFile));
  assertTrue(fs.exists(randomFile));

  // do a simple create/write to ensure the cluster works as expected
  byte[] family = Bytes.toBytes("testfamily");
  byte[] tablename = Bytes.toBytes("testtable");
  HTable table = util2.createTable(tablename, family);
  Put p = new Put(new byte[] { 1, 2, 3 });
  p.add(family, null, new byte[] { 1 });
  table.put(p);
  table.flushCommits();

  // shutdown and make sure cleanly shutting down
  util2.shutdownMiniCluster();
  util1.shutdownMiniDFSCluster();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:TestHBaseOnOtherDfsCluster.java

示例14: test

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
@Test
public void test() throws IOException, InterruptedException {
  testUtil.getHBaseAdmin().createNamespace(
    NamespaceDescriptor.create(tableName.getNamespaceAsString()).build());
  HTable table = testUtil.createTable(tableName, families);
  table.put(new Put(Bytes.toBytes("k")).add(family, Bytes.toBytes("q"), Bytes.toBytes("v")));
  table.flushCommits();
  MiniHBaseCluster cluster = testUtil.getMiniHBaseCluster();
  List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
  Region region = null;
  for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
    HRegionServer hrs = rsts.get(i).getRegionServer();
    for (Region r : hrs.getOnlineRegions(tableName)) {
      region = r;
      break;
    }
  }
  assertNotNull(region);
  Thread.sleep(2000);
  RegionStoreSequenceIds ids =
      testUtil.getHBaseCluster().getMaster()
          .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes());
  assertEquals(HConstants.NO_SEQNUM, ids.getLastFlushedSequenceId());
  // This will be the sequenceid just before that of the earliest edit in memstore.
  long storeSequenceId = ids.getStoreSequenceId(0).getSequenceId();
  assertTrue(storeSequenceId > 0);
  testUtil.getHBaseAdmin().flush(tableName);
  Thread.sleep(2000);
  ids =
      testUtil.getHBaseCluster().getMaster()
          .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes());
  assertTrue(ids.getLastFlushedSequenceId() + " > " + storeSequenceId,
    ids.getLastFlushedSequenceId() > storeSequenceId);
  assertEquals(ids.getLastFlushedSequenceId(), ids.getStoreSequenceId(0).getSequenceId());
  table.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:37,代码来源:TestGetLastFlushedSequenceId.java

示例15: testPreWALRestoreSkip

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testPreWALRestoreSkip() throws Exception {
  LOG.info(TestRegionObserverInterface.class.getName() + ".testPreWALRestoreSkip");
  TableName tableName = TableName.valueOf(SimpleRegionObserver.TABLE_SKIPPED);
  HTable table = util.createTable(tableName, new byte[][] { A, B, C });

  JVMClusterUtil.RegionServerThread rs1 = cluster.startRegionServer();
  ServerName sn2 = rs1.getRegionServer().getServerName();
  String regEN = table.getRegionLocations().firstEntry().getKey().getEncodedName();

  util.getHBaseAdmin().move(regEN.getBytes(), sn2.getServerName().getBytes());
  while (!sn2.equals(table.getRegionLocations().firstEntry().getValue())) {
    Thread.sleep(100);
  }

  Put put = new Put(ROW);
  put.add(A, A, A);
  put.add(B, B, B);
  put.add(C, C, C);
  table.put(put);
  table.flushCommits();

  cluster.killRegionServer(rs1.getRegionServer().getServerName());
  Threads.sleep(20000); // just to be sure that the kill has fully started.
  util.waitUntilAllRegionsAssigned(tableName);

  verifyMethodResult(SimpleRegionObserver.class, new String[] { "getCtPreWALRestore",
      "getCtPostWALRestore", "getCtPreWALRestoreDeprecated", "getCtPostWALRestoreDeprecated"},
      tableName,
      new Integer[] {0, 0, 0, 0});

  util.deleteTable(tableName);
  table.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:35,代码来源:TestRegionObserverInterface.java


注:本文中的org.apache.hadoop.hbase.client.HTable.flushCommits方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。