当前位置: 首页>>代码示例>>Java>>正文


Java EditLogOutputStream.flush方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream.flush方法的典型用法代码示例。如果您正苦于以下问题:Java EditLogOutputStream.flush方法的具体用法?Java EditLogOutputStream.flush怎么用?Java EditLogOutputStream.flush使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream的用法示例。


在下文中一共展示了EditLogOutputStream.flush方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testWriteEditsOneSlow

import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; //导入方法依赖的package包/类
@Test
public void testWriteEditsOneSlow() throws Exception {
  EditLogOutputStream stm = createLogSegment();
  writeOp(stm, 1);
  stm.setReadyToFlush();
  
  // Make the first two logs respond immediately
  futureReturns(null).when(spyLoggers.get(0)).sendEdits(
      anyLong(), eq(1L), eq(1), Mockito.<byte[]>any());
  futureReturns(null).when(spyLoggers.get(1)).sendEdits(
      anyLong(), eq(1L), eq(1), Mockito.<byte[]>any());
  
  // And the third log not respond
  SettableFuture<Void> slowLog = SettableFuture.create();
  Mockito.doReturn(slowLog).when(spyLoggers.get(2)).sendEdits(
      anyLong(), eq(1L), eq(1), Mockito.<byte[]>any());
  stm.flush();
  
  Mockito.verify(spyLoggers.get(0)).setCommittedTxId(1L);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestQuorumJournalManagerUnit.java

示例2: testWriteEditsOneSlow

import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; //导入方法依赖的package包/类
@Test
public void testWriteEditsOneSlow() throws Exception {
  EditLogOutputStream stm = createLogSegment();
  writeOp(stm, 1);
  stm.setReadyToFlush();
  
  // Make the first two logs respond immediately
  futureReturns(null).when(spyLoggers.get(0)).sendEdits(
      anyLong(), eq(1L), eq(1), Mockito.<byte[]>any());
  futureReturns(null).when(spyLoggers.get(1)).sendEdits(
      anyLong(), eq(1L), eq(1), Mockito.<byte[]>any());
  
  // And the third log not respond
  SettableFuture<Void> slowLog = SettableFuture.<Void>create();
  Mockito.doReturn(slowLog).when(spyLoggers.get(2)).sendEdits(
      anyLong(), eq(1L), eq(1), Mockito.<byte[]>any());
  stm.flush();
  
  Mockito.verify(spyLoggers.get(0)).setCommittedTxId(1L, false);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:21,代码来源:TestQuorumJournalManagerUnit.java

示例3: testWriteEditsOneSlow

import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; //导入方法依赖的package包/类
@Test
public void testWriteEditsOneSlow() throws Exception {
  EditLogOutputStream stm = createLogSegment();
  writeOp(stm, 1);
  stm.setReadyToFlush();
  
  // Make the first two logs respond immediately
  futureReturns(null).when(spyLoggers.get(0)).sendEdits(
      anyLong(), eq(1L), eq(1), Mockito.<byte[]>any());
  futureReturns(null).when(spyLoggers.get(1)).sendEdits(
      anyLong(), eq(1L), eq(1), Mockito.<byte[]>any());
  
  // And the third log not respond
  SettableFuture<Void> slowLog = SettableFuture.<Void>create();
  Mockito.doReturn(slowLog).when(spyLoggers.get(2)).sendEdits(
      anyLong(), eq(1L), eq(1), Mockito.<byte[]>any());
  stm.flush();
  
  Mockito.verify(spyLoggers.get(0)).setCommittedTxId(1L);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:21,代码来源:TestQuorumJournalManagerUnit.java

示例4: testWriteEdits

import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; //导入方法依赖的package包/类
@Test
public void testWriteEdits() throws Exception {
  EditLogOutputStream stm = createLogSegment();
  writeOp(stm, 1);
  writeOp(stm, 2);
  
  stm.setReadyToFlush();
  writeOp(stm, 3);
  
  // The flush should log txn 1-2
  futureReturns(null).when(spyLoggers.get(0)).sendEdits(
      anyLong(), eq(1L), eq(2), Mockito.<byte[]>any());
  futureReturns(null).when(spyLoggers.get(1)).sendEdits(
      anyLong(), eq(1L), eq(2), Mockito.<byte[]>any());
  futureReturns(null).when(spyLoggers.get(2)).sendEdits(
      anyLong(), eq(1L), eq(2), Mockito.<byte[]>any());
  stm.flush();

  // Another flush should now log txn #3
  stm.setReadyToFlush();
  futureReturns(null).when(spyLoggers.get(0)).sendEdits(
      anyLong(), eq(3L), eq(1), Mockito.<byte[]>any());
  futureReturns(null).when(spyLoggers.get(1)).sendEdits(
      anyLong(), eq(3L), eq(1), Mockito.<byte[]>any());
  futureReturns(null).when(spyLoggers.get(2)).sendEdits(
      anyLong(), eq(3L), eq(1), Mockito.<byte[]>any());
  stm.flush();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestQuorumJournalManagerUnit.java

示例5: writeTxns

import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; //导入方法依赖的package包/类
public static void writeTxns(EditLogOutputStream stm, long startTxId, int numTxns)
    throws IOException {
  for (long txid = startTxId; txid < startTxId + numTxns; txid++) {
    writeOp(stm, txid);
  }
  stm.setReadyToFlush();
  stm.flush();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:QJMTestUtil.java

示例6: testSimpleRecovery

import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; //导入方法依赖的package包/类
@Test
public void testSimpleRecovery() throws Exception {
  NamespaceInfo nsi = newNSInfo();
  BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
      BKJMUtil.createJournalURI("/hdfsjournal-simplerecovery"),
      nsi);
  bkjm.format(nsi);

  EditLogOutputStream out = bkjm.startLogSegment(1,
      NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);;
  for (long i = 1 ; i <= 100; i++) {
    FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
    op.setTransactionId(i);
    out.write(op);
  }
  out.setReadyToFlush();
  out.flush();

  out.abort();
  out.close();


  assertNull(zkc.exists(bkjm.finalizedLedgerZNode(1, 100), false));
  assertNotNull(zkc.exists(bkjm.inprogressZNode(1), false));

  bkjm.recoverUnfinalizedSegments();

  assertNotNull(zkc.exists(bkjm.finalizedLedgerZNode(1, 100), false));
  assertNull(zkc.exists(bkjm.inprogressZNode(1), false));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestBookKeeperJournalManager.java

示例7: testManifest

import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; //导入方法依赖的package包/类
@Test(timeout=30 * 1000)
public void testManifest() throws IOException {
  final int numSegments = 20;
  final int txnsPerSegment = 10;

  // take over as the writer
  qjm.recoverUnfinalizedSegments();
  List<FSEditLogOp> txns = new ArrayList<FSEditLogOp>();
  List<SegmentDescriptor> segments = new ArrayList<SegmentDescriptor>();

  for (int i = 0; i < numSegments; i++) {
    long startTxId = i * txnsPerSegment;
    long endTxId = startTxId + txnsPerSegment - 1;
    boolean finalize = r.nextBoolean();

    EditLogOutputStream stm = QJMTestUtil.writeRandomSegment(cluster, qjm, i
        * txnsPerSegment, (txnsPerSegment), finalize, txns);
    SegmentDescriptor sd = new SegmentDescriptor(-1, startTxId,
        finalize ? endTxId : -1, false);
    segments.add(sd);

    // validate manifest for all segments
    validateSegmentManifest(segments, cluster);

    if (!finalize) {
      stm.flush();
      stm.close();
      qjm.finalizeLogSegment(startTxId, endTxId);
      sd.endTxId = endTxId;
    }
    // revalidate after closing
    validateSegmentManifest(segments, cluster);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:35,代码来源:TestQJMRecovery.java

示例8: writeRandomSegment

import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; //导入方法依赖的package包/类
public static EditLogOutputStream writeRandomSegment(MiniJournalCluster cluster,
    QuorumJournalManager qjm, long startTxId, int numTxns,
    boolean finalize, List<FSEditLogOp> writtenTxns) throws IOException {
  EditLogOutputStream stm = qjm.startLogSegment(startTxId);
  // Should create in-progress
  assertExistsInQuorum(cluster,
      NNStorage.getInProgressEditsFileName(startTxId));
  
  List<FSEditLogOp> txns = FSEditLogTestUtil.getContiguousLogSegment(
      (int) startTxId, ((int) startTxId + numTxns - 1));
  for (FSEditLogOp op : txns) {
    stm.write(op);
    stm.setReadyToFlush();
    stm.flush();
    if (writtenTxns != null) {
      writtenTxns.add(op);
    }
  }
  
  if (finalize) {
    stm.close();
    qjm.finalizeLogSegment(startTxId, startTxId + numTxns - 1);
    return null;
  } else {
    return stm;
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:28,代码来源:QJMTestUtil.java

示例9: writeTxns

import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; //导入方法依赖的package包/类
public static void writeTxns(EditLogOutputStream stm, long startTxId,
    int numTxns) throws IOException {
  for (long txid = startTxId; txid < startTxId + numTxns; txid++) {
    writeOp(stm, txid);
  }
  stm.setReadyToFlush();
  stm.flush();
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:9,代码来源:QJMTestUtil.java

示例10: testSimpleRecovery

import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; //导入方法依赖的package包/类
@Test
public void testSimpleRecovery() throws Exception {
  NamespaceInfo nsi = newNSInfo();
  BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
      BKJMUtil.createJournalURI("/hdfsjournal-simplerecovery"),
      nsi);
  bkjm.format(nsi);

  EditLogOutputStream out = bkjm.startLogSegment(1);
  for (long i = 1 ; i <= 100; i++) {
    FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
    op.setTransactionId(i);
    out.write(op);
  }
  out.setReadyToFlush();
  out.flush();

  out.abort();
  out.close();


  assertNull(zkc.exists(bkjm.finalizedLedgerZNode(1, 100), false));
  assertNotNull(zkc.exists(bkjm.inprogressZNode(1), false));

  bkjm.recoverUnfinalizedSegments();

  assertNotNull(zkc.exists(bkjm.finalizedLedgerZNode(1, 100), false));
  assertNull(zkc.exists(bkjm.inprogressZNode(1), false));
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:30,代码来源:TestBookKeeperJournalManager.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream.flush方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。