当前位置: 首页>>代码示例>>Java>>正文


Java WAL.rollWriter方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.wal.WAL.rollWriter方法的典型用法代码示例。如果您正苦于以下问题:Java WAL.rollWriter方法的具体用法?Java WAL.rollWriter怎么用?Java WAL.rollWriter使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.wal.WAL的用法示例。


在下文中一共展示了WAL.rollWriter方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testLogRolling

import org.apache.hadoop.hbase.wal.WAL; //导入方法依赖的package包/类
/**
 * Tests that logs are deleted
 * @throws IOException
 * @throws org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException
 */
@Test
public void testLogRolling() throws Exception {
  this.tableName = getName();
    // TODO: Why does this write data take for ever?
    startAndWriteData();
  final WAL log = server.getWAL(null);
  LOG.info("after writing there are " + DefaultWALProvider.getNumRolledLogFiles(log) +
      " log files");

    // flush all regions
    for (Region r: server.getOnlineRegionsLocalContext()) {
      r.flush(true);
    }

    // Now roll the log
    log.rollWriter();

  int count = DefaultWALProvider.getNumRolledLogFiles(log);
  LOG.info("after flushing all regions and rolling logs there are " + count + " log files");
    assertTrue(("actual count: " + count), count <= 2);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestLogRolling.java

示例2: testLogRollOnNothingWritten

import org.apache.hadoop.hbase.wal.WAL; //导入方法依赖的package包/类
/**
 * Tests that log rolling doesn't hang when no data is written.
 */
@Test(timeout=120000)
public void testLogRollOnNothingWritten() throws Exception {
  final Configuration conf = TEST_UTIL.getConfiguration();
  final WALFactory wals = new WALFactory(conf, null,
      ServerName.valueOf("test.com",8080, 1).toString());
  final WAL newLog = wals.getWAL(new byte[]{});
  try {
    // Now roll the log before we write anything.
    newLog.rollWriter(true);
  } finally {
    wals.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:TestLogRolling.java

示例3: testLogRollAfterSplitStart

import org.apache.hadoop.hbase.wal.WAL; //导入方法依赖的package包/类
/**
 * Tests the case where a RegionServer enters a GC pause,
 * comes back online after the master declared it dead and started to split.
 * Want log rolling after a master split to fail. See HBASE-2312.
 */
@Test (timeout=300000)
public void testLogRollAfterSplitStart() throws IOException {
  LOG.info("Verify wal roll after split starts will fail.");
  String logName = "testLogRollAfterSplitStart";
  Path thisTestsDir = new Path(HBASEDIR, DefaultWALProvider.getWALDirectoryName(logName));
  final WALFactory wals = new WALFactory(conf, null, logName);

  try {
    // put some entries in an WAL
    TableName tableName =
        TableName.valueOf(this.getClass().getName());
    HRegionInfo regioninfo = new HRegionInfo(tableName,
        HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
    final WAL log = wals.getWAL(regioninfo.getEncodedNameAsBytes());
    MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);

    final int total = 20;
    for (int i = 0; i < total; i++) {
      WALEdit kvs = new WALEdit();
      kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
      HTableDescriptor htd = new HTableDescriptor(tableName);
      htd.addFamily(new HColumnDescriptor("column"));
      log.append(htd, regioninfo, new WALKey(regioninfo.getEncodedNameAsBytes(), tableName,
          System.currentTimeMillis(), mvcc), kvs, true);
    }
    // Send the data to HDFS datanodes and close the HDFS writer
    log.sync();
    ((FSHLog) log).replaceWriter(((FSHLog)log).getOldPath(), null, null, null);

    /* code taken from MasterFileSystem.getLogDirs(), which is called from MasterFileSystem.splitLog()
     * handles RS shutdowns (as observed by the splitting process)
     */
    // rename the directory so a rogue RS doesn't create more WALs
    Path rsSplitDir = thisTestsDir.suffix(DefaultWALProvider.SPLITTING_EXT);
    if (!fs.rename(thisTestsDir, rsSplitDir)) {
      throw new IOException("Failed fs.rename for log split: " + thisTestsDir);
    }
    LOG.debug("Renamed region directory: " + rsSplitDir);

    LOG.debug("Processing the old log files.");
    WALSplitter.split(HBASEDIR, rsSplitDir, OLDLOGDIR, fs, conf, wals);

    LOG.debug("Trying to roll the WAL.");
    try {
      log.rollWriter();
      Assert.fail("rollWriter() did not throw any exception.");
    } catch (IOException ioe) {
      if (ioe.getCause() instanceof FileNotFoundException) {
        LOG.info("Got the expected exception: ", ioe.getCause());
      } else {
        Assert.fail("Unexpected exception: " + ioe);
      }
    }
  } finally {
    wals.close();
    if (fs.exists(thisTestsDir)) {
      fs.delete(thisTestsDir, true);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:66,代码来源:TestLogRollAbort.java

示例4: testActionListener

import org.apache.hadoop.hbase.wal.WAL; //导入方法依赖的package包/类
/**
 * Add a bunch of dummy data and roll the logs every two insert. We
 * should end up with 10 rolled files (plus the roll called in
 * the constructor). Also test adding a listener while it's running.
 */
@Test
public void testActionListener() throws Exception {
  DummyWALActionsListener observer = new DummyWALActionsListener();
  List<WALActionsListener> list = new ArrayList<WALActionsListener>();
  list.add(observer);
  final WALFactory wals = new WALFactory(conf, list, "testActionListener");
  DummyWALActionsListener laterobserver = new DummyWALActionsListener();
  HRegionInfo hri = new HRegionInfo(TableName.valueOf(SOME_BYTES),
           SOME_BYTES, SOME_BYTES, false);
  final WAL wal = wals.getWAL(hri.getEncodedNameAsBytes());

  for (int i = 0; i < 20; i++) {
    byte[] b = Bytes.toBytes(i+"");
    KeyValue kv = new KeyValue(b,b,b);
    WALEdit edit = new WALEdit();
    edit.add(kv);
    HTableDescriptor htd = new HTableDescriptor();
    htd.addFamily(new HColumnDescriptor(b));

    final long txid = wal.append(htd, hri, new WALKey(hri.getEncodedNameAsBytes(),
        TableName.valueOf(b), 0), edit, true);
    wal.sync(txid);
    if (i == 10) {
      wal.registerWALActionsListener(laterobserver);
    }
    if (i % 2 == 0) {
      wal.rollWriter();
    }
  }

  wal.close();

  assertEquals(11, observer.preLogRollCounter);
  assertEquals(11, observer.postLogRollCounter);
  assertEquals(5, laterobserver.preLogRollCounter);
  assertEquals(5, laterobserver.postLogRollCounter);
  assertEquals(1, observer.closedCount);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:44,代码来源:TestWALActionsListener.java

示例5: testLogRoll

import org.apache.hadoop.hbase.wal.WAL; //导入方法依赖的package包/类
@Test
public void testLogRoll() throws Exception {
  long baseline = 1000;
  long time = baseline;
  MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
  KeyValue kv = new KeyValue(r1, f1, r1);
  WALEdit edit = new WALEdit();
  edit.add(kv);

  List<WALActionsListener> listeners = new ArrayList<WALActionsListener>();
  listeners.add(replication);
  final WALFactory wals = new WALFactory(utility.getConfiguration(), listeners,
      URLEncoder.encode("regionserver:60020", "UTF8"));
  final WAL wal = wals.getWAL(hri.getEncodedNameAsBytes());
  manager.init();
  HTableDescriptor htd = new HTableDescriptor();
  htd.addFamily(new HColumnDescriptor(f1));
  // Testing normal log rolling every 20
  for(long i = 1; i < 101; i++) {
    if(i > 1 && i % 20 == 0) {
      wal.rollWriter();
    }
    LOG.info(i);
    final long txid = wal.append(htd,
        hri,
        new WALKey(hri.getEncodedNameAsBytes(), test, System.currentTimeMillis(), mvcc),
        edit,
        true);
    wal.sync(txid);
  }

  // Simulate a rapid insert that's followed
  // by a report that's still not totally complete (missing last one)
  LOG.info(baseline + " and " + time);
  baseline += 101;
  time = baseline;
  LOG.info(baseline + " and " + time);

  for (int i = 0; i < 3; i++) {
    wal.append(htd, hri,
        new WALKey(hri.getEncodedNameAsBytes(), test, System.currentTimeMillis(), mvcc),
        edit,
        true);
  }
  wal.sync();

  int logNumber = 0;
  for (Map.Entry<String, SortedSet<String>> entry : manager.getWALs().get(slaveId).entrySet()) {
    logNumber += entry.getValue().size();
  }
  assertEquals(6, logNumber);

  wal.rollWriter();

  manager.logPositionAndCleanOldLogs(manager.getSources().get(0).getCurrentPath(),
      "1", 0, false, false);

  wal.append(htd, hri,
      new WALKey(hri.getEncodedNameAsBytes(), test, System.currentTimeMillis(), mvcc),
      edit,
      true);
  wal.sync();

  assertEquals(1, manager.getWALs().size());


  // TODO Need a case with only 2 WALs and we only want to delete the first one
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:69,代码来源:TestReplicationSourceManager.java

示例6: testWALPlayer

import org.apache.hadoop.hbase.wal.WAL; //导入方法依赖的package包/类
/**
 * Simple end-to-end test
 * @throws Exception
 */
@Test
public void testWALPlayer() throws Exception {
  final TableName TABLENAME1 = TableName.valueOf("testWALPlayer1");
  final TableName TABLENAME2 = TableName.valueOf("testWALPlayer2");
  final byte[] FAMILY = Bytes.toBytes("family");
  final byte[] COLUMN1 = Bytes.toBytes("c1");
  final byte[] COLUMN2 = Bytes.toBytes("c2");
  final byte[] ROW = Bytes.toBytes("row");
  Table t1 = TEST_UTIL.createTable(TABLENAME1, FAMILY);
  Table t2 = TEST_UTIL.createTable(TABLENAME2, FAMILY);

  // put a row into the first table
  Put p = new Put(ROW);
  p.add(FAMILY, COLUMN1, COLUMN1);
  p.add(FAMILY, COLUMN2, COLUMN2);
  t1.put(p);
  // delete one column
  Delete d = new Delete(ROW);
  d.deleteColumns(FAMILY, COLUMN1);
  t1.delete(d);

  // replay the WAL, map table 1 to table 2
  WAL log = cluster.getRegionServer(0).getWAL(null);
  log.rollWriter();
  String walInputDir = new Path(cluster.getMaster().getMasterFileSystem()
      .getRootDir(), HConstants.HREGION_LOGDIR_NAME).toString();

  Configuration configuration= TEST_UTIL.getConfiguration();
  WALPlayer player = new WALPlayer(configuration);
  String optionName="_test_.name";
  configuration.set(optionName, "1000");
  player.setupTime(configuration, optionName);
  assertEquals(1000,configuration.getLong(optionName,0));
  assertEquals(0, player.run(new String[] {walInputDir, TABLENAME1.getNameAsString(),
      TABLENAME2.getNameAsString() }));

  
  // verify the WAL was player into table 2
  Get g = new Get(ROW);
  Result r = t2.get(g);
  assertEquals(1, r.size());
  assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN2));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:48,代码来源:TestWALPlayer.java

示例7: testCompactionRecordDoesntBlockRolling

import org.apache.hadoop.hbase.wal.WAL; //导入方法依赖的package包/类
/**
 * Tests that logs are deleted when some region has a compaction
 * record in WAL and no other records. See HBASE-8597.
 */
@Test
public void testCompactionRecordDoesntBlockRolling() throws Exception {
  Table table = null;
  Table table2 = null;

  // When the hbase:meta table can be opened, the region servers are running
  Table t = new HTable(TEST_UTIL.getConfiguration(), TableName.META_TABLE_NAME);
  try {
    table = createTestTable(getName());
    table2 = createTestTable(getName() + "1");

    server = TEST_UTIL.getRSForFirstRegionInTable(table.getName());
    final WAL log = server.getWAL(null);
    Region region = server.getOnlineRegions(table2.getName()).get(0);
    Store s = region.getStore(HConstants.CATALOG_FAMILY);

    //have to flush namespace to ensure it doesn't affect wall tests
    admin.flush(TableName.NAMESPACE_TABLE_NAME);

    // Put some stuff into table2, to make sure we have some files to compact.
    for (int i = 1; i <= 2; ++i) {
      doPut(table2, i);
      admin.flush(table2.getName());
    }
    doPut(table2, 3); // don't flush yet, or compaction might trigger before we roll WAL
    assertEquals("Should have no WAL after initial writes", 0,
        DefaultWALProvider.getNumRolledLogFiles(log));
    assertEquals(2, s.getStorefilesCount());

    // Roll the log and compact table2, to have compaction record in the 2nd WAL.
    log.rollWriter();
    assertEquals("Should have WAL; one table is not flushed", 1,
        DefaultWALProvider.getNumRolledLogFiles(log));
    admin.flush(table2.getName());
    region.compact(false);
    // Wait for compaction in case if flush triggered it before us.
    Assert.assertNotNull(s);
    for (int waitTime = 3000; s.getStorefilesCount() > 1 && waitTime > 0; waitTime -= 200) {
      Threads.sleepWithoutInterrupt(200);
    }
    assertEquals("Compaction didn't happen", 1, s.getStorefilesCount());

    // Write some value to the table so the WAL cannot be deleted until table is flushed.
    doPut(table, 0); // Now 2nd WAL will have compaction record for table2 and put for table.
    log.rollWriter(); // 1st WAL deleted, 2nd not deleted yet.
    assertEquals("Should have WAL; one table is not flushed", 1,
        DefaultWALProvider.getNumRolledLogFiles(log));

    // Flush table to make latest WAL obsolete; write another record, and roll again.
    admin.flush(table.getName());
    doPut(table, 1);
    log.rollWriter(); // Now 2nd WAL is deleted and 3rd is added.
    assertEquals("Should have 1 WALs at the end", 1,
        DefaultWALProvider.getNumRolledLogFiles(log));
  } finally {
    if (t != null) t.close();
    if (table != null) table.close();
    if (table2 != null) table2.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:65,代码来源:TestLogRolling.java


注:本文中的org.apache.hadoop.hbase.wal.WAL.rollWriter方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。