当前位置: 首页>>代码示例>>Java>>正文


Java EditLogOutputStream.close方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream.close方法的典型用法代码示例。如果您正苦于以下问题:Java EditLogOutputStream.close方法的具体用法?Java EditLogOutputStream.close怎么用?Java EditLogOutputStream.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream的用法示例。


在下文中一共展示了EditLogOutputStream.close方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: writeSegmentUntilCrash

import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; //导入方法依赖的package包/类
private long writeSegmentUntilCrash(MiniJournalCluster cluster,
    QuorumJournalManager qjm, long txid, int numTxns, Holder<Throwable> thrown) {
  
  long firstTxId = txid;
  long lastAcked = txid - 1;
  try {
    EditLogOutputStream stm = qjm.startLogSegment(txid,
        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
    
    for (int i = 0; i < numTxns; i++) {
      QJMTestUtil.writeTxns(stm, txid++, 1);
      lastAcked++;
    }
    
    stm.close();
    qjm.finalizeLogSegment(firstTxId, lastAcked);
  } catch (Throwable t) {
    thrown.held = t;
  }
  return lastAcked;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestQJMWithFaults.java

示例2: writeSegment

import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; //导入方法依赖的package包/类
public static EditLogOutputStream writeSegment(MiniJournalCluster cluster,
    QuorumJournalManager qjm, long startTxId, int numTxns,
    boolean finalize) throws IOException {
  EditLogOutputStream stm = qjm.startLogSegment(startTxId,
      NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
  // Should create in-progress
  assertExistsInQuorum(cluster,
      NNStorage.getInProgressEditsFileName(startTxId));
  
  writeTxns(stm, startTxId, numTxns);
  if (finalize) {
    stm.close();
    qjm.finalizeLogSegment(startTxId, startTxId + numTxns - 1);
    return null;
  } else {
    return stm;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:QJMTestUtil.java

示例3: testSimpleWrite

import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; //导入方法依赖的package包/类
@Test
public void testSimpleWrite() throws Exception {
  NamespaceInfo nsi = newNSInfo();
  BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
      BKJMUtil.createJournalURI("/hdfsjournal-simplewrite"), nsi);
  bkjm.format(nsi);

  EditLogOutputStream out = bkjm.startLogSegment(1,
      NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
  for (long i = 1 ; i <= 100; i++) {
    FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
    op.setTransactionId(i);
    out.write(op);
  }
  out.close();
  bkjm.finalizeLogSegment(1, 100);
 
  String zkpath = bkjm.finalizedLedgerZNode(1, 100);
  
  assertNotNull(zkc.exists(zkpath, false));
  assertNull(zkc.exists(bkjm.inprogressZNode(1), false));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestBookKeeperJournalManager.java

示例4: testNumberOfTransactions

import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; //导入方法依赖的package包/类
@Test
public void testNumberOfTransactions() throws Exception {
  NamespaceInfo nsi = newNSInfo();

  BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
      BKJMUtil.createJournalURI("/hdfsjournal-txncount"), nsi);
  bkjm.format(nsi);

  EditLogOutputStream out = bkjm.startLogSegment(1,
      NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
  for (long i = 1 ; i <= 100; i++) {
    FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
    op.setTransactionId(i);
    out.write(op);
  }
  out.close();
  bkjm.finalizeLogSegment(1, 100);

  long numTrans = bkjm.getNumberOfTransactions(1, true);
  assertEquals(100, numTrans);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestBookKeeperJournalManager.java

示例5: testTwoWriters

import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; //导入方法依赖的package包/类
@Test
public void testTwoWriters() throws Exception {
  long start = 1;
  NamespaceInfo nsi = newNSInfo();

  BookKeeperJournalManager bkjm1 = new BookKeeperJournalManager(conf,
      BKJMUtil.createJournalURI("/hdfsjournal-dualWriter"), nsi);
  bkjm1.format(nsi);

  BookKeeperJournalManager bkjm2 = new BookKeeperJournalManager(conf,
      BKJMUtil.createJournalURI("/hdfsjournal-dualWriter"), nsi);


  EditLogOutputStream out1 = bkjm1.startLogSegment(start,
      NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
  try {
    bkjm2.startLogSegment(start,
      NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
    fail("Shouldn't have been able to open the second writer");
  } catch (IOException ioe) {
    LOG.info("Caught exception as expected", ioe);
  }finally{
    out1.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestBookKeeperJournalManager.java

示例6: startAndFinalizeLogSegment

import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; //导入方法依赖的package包/类
private String startAndFinalizeLogSegment(BookKeeperJournalManager bkjm,
    int startTxid, int endTxid) throws IOException, KeeperException,
    InterruptedException {
  EditLogOutputStream out = bkjm.startLogSegment(startTxid,
      NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
  for (long i = startTxid; i <= endTxid; i++) {
    FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
    op.setTransactionId(i);
    out.write(op);
  }
  out.close();
  // finalize the inprogress_1 log segment.
  bkjm.finalizeLogSegment(startTxid, endTxid);
  String zkpath1 = bkjm.finalizedLedgerZNode(startTxid, endTxid);
  assertNotNull(zkc.exists(zkpath1, false));
  assertNull(zkc.exists(bkjm.inprogressZNode(startTxid), false));
  return zkpath1;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestBookKeeperJournalManager.java

示例7: writeSegment

import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; //导入方法依赖的package包/类
public static EditLogOutputStream writeSegment(MiniJournalCluster cluster,
    QuorumJournalManager qjm, long startTxId, int numTxns,
    boolean finalize) throws IOException {
  EditLogOutputStream stm = qjm.startLogSegment(startTxId);
  // Should create in-progress
  assertExistsInQuorum(cluster,
      NNStorage.getInProgressEditsFileName(startTxId));
  
  writeTxns(stm, startTxId, numTxns);
  if (finalize) {
    stm.close();
    qjm.finalizeLogSegment(startTxId, startTxId + numTxns - 1);
    return null;
  } else {
    return stm;
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:18,代码来源:QJMTestUtil.java

示例8: testTwoWriters

import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; //导入方法依赖的package包/类
@Test
public void testTwoWriters() throws Exception {
  long start = 1;
  NamespaceInfo nsi = newNSInfo();

  BookKeeperJournalManager bkjm1 = new BookKeeperJournalManager(conf,
      BKJMUtil.createJournalURI("/hdfsjournal-dualWriter"), nsi);
  bkjm1.format(nsi);

  BookKeeperJournalManager bkjm2 = new BookKeeperJournalManager(conf,
      BKJMUtil.createJournalURI("/hdfsjournal-dualWriter"), nsi);


  EditLogOutputStream out1 = bkjm1.startLogSegment(start);
  try {
    bkjm2.startLogSegment(start);
    fail("Shouldn't have been able to open the second writer");
  } catch (IOException ioe) {
    LOG.info("Caught exception as expected", ioe);
  }finally{
    out1.close();
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:24,代码来源:TestBookKeeperJournalManager.java

示例9: testNumberOfTransactions

import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; //导入方法依赖的package包/类
@Test
public void testNumberOfTransactions() throws Exception {
  NamespaceInfo nsi = newNSInfo();

  BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
      BKJMUtil.createJournalURI("/hdfsjournal-txncount"), nsi);
  bkjm.format(nsi);

  EditLogOutputStream out = bkjm.startLogSegment(1);
  for (long i = 1 ; i <= 100; i++) {
    FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
    op.setTransactionId(i);
    out.write(op);
  }
  out.close();
  bkjm.finalizeLogSegment(1, 100);

  long numTrans = bkjm.getNumberOfTransactions(1, true);
  assertEquals(100, numTrans);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:21,代码来源:TestBookKeeperJournalManager.java

示例10: writeSegmentUntilCrash

import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; //导入方法依赖的package包/类
private long writeSegmentUntilCrash(MiniJournalCluster cluster,
    QuorumJournalManager qjm, long txid, int numTxns, Holder<Throwable> thrown) {
  
  long firstTxId = txid;
  long lastAcked = txid - 1;
  try {
    EditLogOutputStream stm = qjm.startLogSegment(txid);
    
    for (int i = 0; i < numTxns; i++) {
      QJMTestUtil.writeTxns(stm, txid++, 1);
      lastAcked++;
    }
    
    stm.close();
    qjm.finalizeLogSegment(firstTxId, lastAcked);
  } catch (Throwable t) {
    thrown.held = t;
  }
  return lastAcked;
}
 
开发者ID:huiyi-learning,项目名称:hardfs,代码行数:21,代码来源:TestQJMWithFaults.java

示例11: testSimpleWrite

import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; //导入方法依赖的package包/类
@Test
public void testSimpleWrite() throws Exception {
  NamespaceInfo nsi = newNSInfo();
  BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
      BKJMUtil.createJournalURI("/hdfsjournal-simplewrite"), nsi);
  bkjm.format(nsi);

  EditLogOutputStream out = bkjm.startLogSegment(1);
  for (long i = 1 ; i <= 100; i++) {
    FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
    op.setTransactionId(i);
    out.write(op);
  }
  out.close();
  bkjm.finalizeLogSegment(1, 100);
 
  String zkpath = bkjm.finalizedLedgerZNode(1, 100);
  
  assertNotNull(zkc.exists(zkpath, false));
  assertNull(zkc.exists(bkjm.inprogressZNode(1), false));
}
 
开发者ID:huiyi-learning,项目名称:hardfs,代码行数:22,代码来源:TestBookKeeperJournalManager.java

示例12: testNumberOfTransactionsWithGaps

import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; //导入方法依赖的package包/类
@Test 
public void testNumberOfTransactionsWithGaps() throws Exception {
  NamespaceInfo nsi = newNSInfo();
  BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
      BKJMUtil.createJournalURI("/hdfsjournal-gaps"), nsi);
  bkjm.format(nsi);

  long txid = 1;
  for (long i = 0; i < 3; i++) {
    long start = txid;
    EditLogOutputStream out = bkjm.startLogSegment(start,
        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
    for (long j = 1 ; j <= DEFAULT_SEGMENT_SIZE; j++) {
      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
      op.setTransactionId(txid++);
      out.write(op);
    }
    out.close();
    bkjm.finalizeLogSegment(start, txid-1);
    assertNotNull(
        zkc.exists(bkjm.finalizedLedgerZNode(start, txid-1), false));
  }
  zkc.delete(bkjm.finalizedLedgerZNode(DEFAULT_SEGMENT_SIZE+1,
                                       DEFAULT_SEGMENT_SIZE*2), -1);
  
  long numTrans = bkjm.getNumberOfTransactions(1, true);
  assertEquals(DEFAULT_SEGMENT_SIZE, numTrans);
  
  try {
    numTrans = bkjm.getNumberOfTransactions(DEFAULT_SEGMENT_SIZE+1, true);
    fail("Should have thrown corruption exception by this point");
  } catch (JournalManager.CorruptionException ce) {
    // if we get here, everything is going good
  }

  numTrans = bkjm.getNumberOfTransactions((DEFAULT_SEGMENT_SIZE*2)+1, true);
  assertEquals(DEFAULT_SEGMENT_SIZE, numTrans);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestBookKeeperJournalManager.java

示例13: testSimpleRead

import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; //导入方法依赖的package包/类
@Test
public void testSimpleRead() throws Exception {
  NamespaceInfo nsi = newNSInfo();
  BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
      BKJMUtil.createJournalURI("/hdfsjournal-simpleread"),
      nsi);
  bkjm.format(nsi);

  final long numTransactions = 10000;
  EditLogOutputStream out = bkjm.startLogSegment(1,
      NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);;
  for (long i = 1 ; i <= numTransactions; i++) {
    FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
    op.setTransactionId(i);
    out.write(op);
  }
  out.close();
  bkjm.finalizeLogSegment(1, numTransactions);

  List<EditLogInputStream> in = new ArrayList<EditLogInputStream>();
  bkjm.selectInputStreams(in, 1, true);
  try {
    assertEquals(numTransactions, 
                 FSEditLogTestUtil.countTransactionsInStream(in.get(0)));
  } finally {
    in.get(0).close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestBookKeeperJournalManager.java

示例14: testSimpleRecovery

import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; //导入方法依赖的package包/类
@Test
public void testSimpleRecovery() throws Exception {
  NamespaceInfo nsi = newNSInfo();
  BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
      BKJMUtil.createJournalURI("/hdfsjournal-simplerecovery"),
      nsi);
  bkjm.format(nsi);

  EditLogOutputStream out = bkjm.startLogSegment(1,
      NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);;
  for (long i = 1 ; i <= 100; i++) {
    FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
    op.setTransactionId(i);
    out.write(op);
  }
  out.setReadyToFlush();
  out.flush();

  out.abort();
  out.close();


  assertNull(zkc.exists(bkjm.finalizedLedgerZNode(1, 100), false));
  assertNotNull(zkc.exists(bkjm.inprogressZNode(1), false));

  bkjm.recoverUnfinalizedSegments();

  assertNotNull(zkc.exists(bkjm.finalizedLedgerZNode(1, 100), false));
  assertNull(zkc.exists(bkjm.inprogressZNode(1), false));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestBookKeeperJournalManager.java

示例15: testEmptyInprogressNode

import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; //导入方法依赖的package包/类
/**
 * If a journal manager has an empty inprogress node, ensure that we throw an
 * error, as this should not be possible, and some third party has corrupted
 * the zookeeper state
 */
@Test
public void testEmptyInprogressNode() throws Exception {
  URI uri = BKJMUtil.createJournalURI("/hdfsjournal-emptyInprogress");
  NamespaceInfo nsi = newNSInfo();
  BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
                                                               nsi);
  bkjm.format(nsi);

  EditLogOutputStream out = bkjm.startLogSegment(1,
      NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);;
  for (long i = 1; i <= 100; i++) {
    FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
    op.setTransactionId(i);
    out.write(op);
  }
  out.close();
  bkjm.finalizeLogSegment(1, 100);

  out = bkjm.startLogSegment(101,
      NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
  out.close();
  bkjm.close();
  String inprogressZNode = bkjm.inprogressZNode(101);
  zkc.setData(inprogressZNode, new byte[0], -1);

  bkjm = new BookKeeperJournalManager(conf, uri, nsi);
  try {
    bkjm.recoverUnfinalizedSegments();
    fail("Should have failed. There should be no way of creating"
        + " an empty inprogess znode");
  } catch (IOException e) {
    // correct behaviour
    assertTrue("Exception different than expected", e.getMessage().contains(
        "Invalid/Incomplete data in znode"));
  } finally {
    bkjm.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:44,代码来源:TestBookKeeperJournalManager.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。