当前位置: 首页>>代码示例>>Java>>正文


Java FSDataOutputStream.hflush方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FSDataOutputStream.hflush方法的典型用法代码示例。如果您正苦于以下问题:Java FSDataOutputStream.hflush方法的具体用法?Java FSDataOutputStream.hflush怎么用?Java FSDataOutputStream.hflush使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FSDataOutputStream的用法示例。


在下文中一共展示了FSDataOutputStream.hflush方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: checkAndMarkRunning

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * The idea for making sure that there is no more than one instance
 * running in an HDFS is to create a file in the HDFS, writes the hostname
 * of the machine on which the instance is running to the file, but did not
 * close the file until it exits. 
 * 
 * This prevents the second instance from running because it can not
 * creates the file while the first one is running.
 * 
 * This method checks if there is any running instance. If no, mark yes.
 * Note that this is an atomic operation.
 * 
 * @return null if there is a running instance;
 *         otherwise, the output stream to the newly created file.
 */
private OutputStream checkAndMarkRunning() throws IOException {
  try {
    if (fs.exists(idPath)) {
      // try appending to it so that it will fail fast if another balancer is
      // running.
      IOUtils.closeStream(fs.append(idPath));
      fs.delete(idPath, true);
    }
    final FSDataOutputStream fsout = fs.create(idPath, false);
    // mark balancer idPath to be deleted during filesystem closure
    fs.deleteOnExit(idPath);
    if (write2IdFile) {
      fsout.writeBytes(InetAddress.getLocalHost().getHostName());
      fsout.hflush();
    }
    return fsout;
  } catch(RemoteException e) {
    if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){
      return null;
    } else {
      throw e;
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:NameNodeConnector.java

示例2: run

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Override
public void run() {
  FSDataOutputStream out = null;
  int i = 0;
  try {
    out = fs.create(filepath);
    for(; running; i++) {
      System.out.println(getName() + " writes " + i);
      out.write(i);
      out.hflush();
      sleep(100);
    }
  }
  catch(Exception e) {
    System.out.println(getName() + " dies: e=" + e);
  }
  finally {
    System.out.println(getName() + ": i=" + i);
    IOUtils.closeStream(out);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestFileCreationClient.java

示例3: syncSlots

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
protected long syncSlots(FSDataOutputStream stream, ByteSlot[] slots, int offset, int count)
    throws IOException {
  long totalSynced = 0;
  for (int i = 0; i < count; ++i) {
    ByteSlot data = slots[offset + i];
    data.writeTo(stream);
    totalSynced += data.size();
  }

  if (useHsync) {
    stream.hsync();
  } else {
    stream.hflush();
  }
  sendPostSyncSignal();

  if (LOG.isTraceEnabled()) {
    LOG.trace("Sync slots=" + count + '/' + slots.length +
              ", flushed=" + StringUtils.humanSize(totalSynced));
  }
  return totalSynced;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:WALProcedureStore.java

示例4: doAnAction

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Override
public void doAnAction() throws Exception {
  FSDataOutputStream stm = fs.create(path, true);
  try {
    AppendTestUtil.write(stm, 0, 100);
    stm.hflush();
    loopRecoverLease(fsOtherUser, path);
    AppendTestUtil.check(fs, path, 100);
  } finally {
    try {
      stm.close();
    } catch (IOException e) {
      // should expect this since we lost the lease
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestPipelinesFailover.java

示例5: testAbandonBlock

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Test
/** Abandon a block while creating a file */
public void testAbandonBlock() throws IOException {
  String src = FILE_NAME_PREFIX + "foo";

  // Start writing a file but do not close it
  FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short)1, 512L);
  for (int i = 0; i < 1024; i++) {
    fout.write(123);
  }
  fout.hflush();
  long fileId = ((DFSOutputStream)fout.getWrappedStream()).getFileId();

  // Now abandon the last block
  DFSClient dfsclient = DFSClientAdapter.getDFSClient(fs);
  LocatedBlocks blocks =
    dfsclient.getNamenode().getBlockLocations(src, 0, Integer.MAX_VALUE);
  int orginalNumBlocks = blocks.locatedBlockCount();
  LocatedBlock b = blocks.getLastLocatedBlock();
  dfsclient.getNamenode().abandonBlock(b.getBlock(), fileId, src,
      dfsclient.clientName);
  
  // call abandonBlock again to make sure the operation is idempotent
  dfsclient.getNamenode().abandonBlock(b.getBlock(), fileId, src,
      dfsclient.clientName);

  // And close the file
  fout.close();

  // Close cluster and check the block has been abandoned after restart
  cluster.restartNameNode();
  blocks = dfsclient.getNamenode().getBlockLocations(src, 0,
      Integer.MAX_VALUE);
  Assert.assertEquals("Blocks " + b + " has not been abandoned.",
      orginalNumBlocks, blocks.locatedBlockCount() + 1);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TestAbandonBlock.java

示例6: createAndHflush

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
static FSDataOutputStream createAndHflush(FileSystem fs, Path file,
    byte[] data, int length) throws IOException{
  FSDataOutputStream out = fs.create(file, false, 4096, (short)3, 1024);
  out.write(data, 0, length);
  out.hflush();
  return out;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:TestHAAppend.java

示例7: testWriteToDeletedFile

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Test(timeout=120000)
public void testWriteToDeletedFile() throws IOException {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .build();
  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();

  Path path = new Path("/test1");
  assertTrue(fs.mkdirs(path));

  int size = conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
  byte[] data = new byte[size];

  // Create one file
  Path filePath = new Path("/test1/file");
  FSDataOutputStream fos = fs.create(filePath);

  // Delete the file
  fs.delete(filePath, false);

  // Add new block should fail since /test1/file has been deleted.
  try {
    fos.write(data, 0, data.length);
    // make sure addBlock() request gets to NN immediately
    fos.hflush();

    fail("Write should fail after delete");
  } catch (Exception e) {
    /* Ignore */
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestINodeFile.java

示例8: testHSyncBlockBoundary

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/** Test hsync on an exact block boundary */
@Test
public void testHSyncBlockBoundary() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  final FileSystem fs = cluster.getFileSystem();
  
  final Path p = new Path("/testHSyncBlockBoundary/foo");
  final int len = 1 << 16;
  final byte[] fileContents = AppendTestUtil.initBuffer(len);
  FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
      4096, (short) 1, len, null);
  // fill exactly one block (tests the SYNC_BLOCK case) and flush
  out.write(fileContents, 0, len);
  out.hflush();
  // the full block should have caused a sync
  checkSyncMetric(cluster, 1);
  out.hsync();
  // first on block again
  checkSyncMetric(cluster, 1);
  // write one more byte and sync again
  out.write(1);
  out.hsync();
  checkSyncMetric(cluster, 2);
  out.close();
  checkSyncMetric(cluster, 3);
  cluster.shutdown();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestHSync.java

示例9: testRestartWithPartialBlockHflushed

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Test
public void testRestartWithPartialBlockHflushed() throws IOException {
  final Configuration conf = new HdfsConfiguration();
  // Turn off persistent IPC, so that the DFSClient can survive NN restart
  conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
      0);
  MiniDFSCluster cluster = null;

  FSDataOutputStream stream;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    FileSystem fs = cluster.getFileSystem();
    NameNode.getAddress(conf).getPort();
    // Creating a file with 4096 blockSize to write multiple blocks
    stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
    stream.write(DATA_BEFORE_RESTART);
    stream.write((byte)1);
    stream.hflush();
    
    // explicitly do NOT close the file before restarting the NN.
    cluster.restartNameNode();
    
    // this will fail if the final block of the file is prematurely COMPLETEd
    stream.write((byte)2);
    stream.hflush();
    stream.close();
    
    assertEquals(DATA_BEFORE_RESTART.length + 2,
        fs.getFileStatus(FILE_PATH).getLen());
    
    FSDataInputStream readStream = fs.open(FILE_PATH);
    try {
      byte[] verifyBuf = new byte[DATA_BEFORE_RESTART.length + 2];
      IOUtils.readFully(readStream, verifyBuf, 0, verifyBuf.length);
      byte[] expectedBuf = new byte[DATA_BEFORE_RESTART.length + 2];
      System.arraycopy(DATA_BEFORE_RESTART, 0, expectedBuf, 0,
          DATA_BEFORE_RESTART.length);
      System.arraycopy(new byte[]{1, 2}, 0, expectedBuf,
          DATA_BEFORE_RESTART.length, 2);
      assertArrayEquals(expectedBuf, verifyBuf);
    } finally {
      IOUtils.closeStream(readStream);
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:49,代码来源:TestPersistBlocks.java

示例10: testExcludedNodesForgiveness

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testExcludedNodesForgiveness() throws IOException {
  // Forgive nodes in under 2.5s for this test case.
  conf.setLong(
      DFSConfigKeys.DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL,
      2500);
  // We'll be using a 512 bytes block size just for tests
  // so making sure the checksum bytes too match it.
  conf.setInt("io.bytes.per.checksum", 512);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  List<DataNodeProperties> props = cluster.dataNodes;
  FileSystem fs = cluster.getFileSystem();
  Path filePath = new Path("/testForgivingExcludedNodes");

  // 256 bytes data chunk for writes
  byte[] bytes = new byte[256];
  for (int index=0; index<bytes.length; index++) {
    bytes[index] = '0';
  }

  // File with a 512 bytes block size
  FSDataOutputStream out = fs.create(filePath, true, 4096, (short) 3, 512);

  // Write a block to all 3 DNs (2x256bytes).
  out.write(bytes);
  out.write(bytes);
  out.hflush();

  // Remove two DNs, to put them into the exclude list.
  DataNodeProperties two = cluster.stopDataNode(2);
  DataNodeProperties one = cluster.stopDataNode(1);

  // Write another block.
  // At this point, we have two nodes already in excluded list.
  out.write(bytes);
  out.write(bytes);
  out.hflush();

  // Bring back the older DNs, since they are gonna be forgiven only
  // afterwards of this previous block write.
  Assert.assertEquals(true, cluster.restartDataNode(one, true));
  Assert.assertEquals(true, cluster.restartDataNode(two, true));
  cluster.waitActive();

  // Sleep for 5s, to let the excluded nodes be expired
  // from the excludes list (i.e. forgiven after the configured wait period).
  // [Sleeping just in case the restart of the DNs completed < 5s cause
  // otherwise, we'll end up quickly excluding those again.]
  ThreadUtil.sleepAtLeastIgnoreInterrupts(5000);

  // Terminate the last good DN, to assert that there's no
  // single-DN-available scenario, caused by not forgiving the other
  // two by now.
  cluster.stopDataNode(0);

  try {
    // Attempt writing another block, which should still pass
    // cause the previous two should have been forgiven by now,
    // while the last good DN added to excludes this time.
    out.write(bytes);
    out.hflush();
    out.close();
  } catch (Exception e) {
    fail("Excluded DataNodes should be forgiven after a while and " +
         "not cause file writing exception of: '" + e.getMessage() + "'");
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:68,代码来源:TestDFSClientExcludedNodes.java

示例11: doWriteOverFailoverTest

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private void doWriteOverFailoverTest(TestScenario scenario,
    MethodToTestIdempotence methodToTest) throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  // Don't check replication periodically.
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
  
  FSDataOutputStream stm = null;
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(3)
    .build();
  try {
    int sizeWritten = 0;
    
    cluster.waitActive();
    cluster.transitionToActive(0);
    Thread.sleep(500);

    LOG.info("Starting with NN 0 active");
    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
    stm = fs.create(TEST_PATH);
    
    // write a block and a half
    AppendTestUtil.write(stm, 0, BLOCK_AND_A_HALF);
    sizeWritten += BLOCK_AND_A_HALF;
    
    // Make sure all of the blocks are written out before failover.
    stm.hflush();

    LOG.info("Failing over to NN 1");
    scenario.run(cluster);

    // NOTE: explicitly do *not* make any further metadata calls
    // to the NN here. The next IPC call should be to allocate the next
    // block. Any other call would notice the failover and not test
    // idempotence of the operation (HDFS-3031)
    
    FSNamesystem ns1 = cluster.getNameNode(1).getNamesystem();
    BlockManagerTestUtil.updateState(ns1.getBlockManager());
    assertEquals(0, ns1.getPendingReplicationBlocks());
    assertEquals(0, ns1.getCorruptReplicaBlocks());
    assertEquals(0, ns1.getMissingBlocksCount());

    // If we're testing allocateBlock()'s idempotence, write another
    // block and a half, so we have to allocate a new block.
    // Otherise, don't write anything, so our next RPC will be
    // completeFile() if we're testing idempotence of that operation.
    if (methodToTest == MethodToTestIdempotence.ALLOCATE_BLOCK) {
      // write another block and a half
      AppendTestUtil.write(stm, sizeWritten, BLOCK_AND_A_HALF);
      sizeWritten += BLOCK_AND_A_HALF;
    }
    
    stm.close();
    stm = null;
    
    AppendTestUtil.check(fs, TEST_PATH, sizeWritten);
  } finally {
    IOUtils.closeStream(stm);
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:64,代码来源:TestPipelinesFailover.java

示例12: testLeaseRecoveryAfterFailover

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * Tests lease recovery if a client crashes. This approximates the
 * use case of HBase WALs being recovered after a NN failover.
 */
@Test(timeout=30000)
public void testLeaseRecoveryAfterFailover() throws Exception {
  final Configuration conf = new Configuration();
  // Disable permissions so that another user can recover the lease.
  conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
  conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  
  FSDataOutputStream stm = null;
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(3)
    .build();
  try {
    cluster.waitActive();
    cluster.transitionToActive(0);
    Thread.sleep(500);

    LOG.info("Starting with NN 0 active");
    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
    stm = fs.create(TEST_PATH);
    
    // write a block and a half
    AppendTestUtil.write(stm, 0, BLOCK_AND_A_HALF);
    stm.hflush();
    
    LOG.info("Failing over to NN 1");
    
    cluster.transitionToStandby(0);
    cluster.transitionToActive(1);
    
    assertTrue(fs.exists(TEST_PATH));

    FileSystem fsOtherUser = createFsAsOtherUser(cluster, conf);
    loopRecoverLease(fsOtherUser, TEST_PATH);
    
    AppendTestUtil.check(fs, TEST_PATH, BLOCK_AND_A_HALF);
    
    // Fail back to ensure that the block locations weren't lost on the
    // original node.
    cluster.transitionToStandby(1);
    cluster.transitionToActive(0);
    AppendTestUtil.check(fs, TEST_PATH, BLOCK_AND_A_HALF);      
  } finally {
    IOUtils.closeStream(stm);
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:52,代码来源:TestPipelinesFailover.java

示例13: testOpenFileWhenNNAndClientCrashAfterAddBlock

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/** Test NN crash and client crash/stuck immediately after block allocation */
@Test(timeout = 100000)
public void testOpenFileWhenNNAndClientCrashAfterAddBlock() throws Exception {
  cluster.getConfiguration(0).set(
      DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "1.0f");
  String testData = "testData";
  // to make sure we write the full block before creating dummy block at NN.
  cluster.getConfiguration(0).setInt("io.bytes.per.checksum",
      testData.length());
  cluster.restartNameNode(0);
  try {
    cluster.waitActive();
    cluster.transitionToActive(0);
    cluster.transitionToStandby(1);
    DistributedFileSystem dfs = cluster.getFileSystem(0);
    String pathString = "/tmp1.txt";
    Path filePath = new Path(pathString);
    FSDataOutputStream create = dfs.create(filePath,
        FsPermission.getDefault(), true, 1024, (short) 3, testData.length(),
        null);
    create.write(testData.getBytes());
    create.hflush();
    long fileId = ((DFSOutputStream)create.
        getWrappedStream()).getFileId();
    FileStatus fileStatus = dfs.getFileStatus(filePath);
    DFSClient client = DFSClientAdapter.getClient(dfs);
    // add one dummy block at NN, but not write to DataNode
    ExtendedBlock previousBlock =
        DFSClientAdapter.getPreviousBlock(client, fileId);
    DFSClientAdapter.getNamenode(client).addBlock(
        pathString,
        client.getClientName(),
        new ExtendedBlock(previousBlock),
        new DatanodeInfo[0],
        DFSClientAdapter.getFileId((DFSOutputStream) create
            .getWrappedStream()), null);
    cluster.restartNameNode(0, true);
    cluster.restartDataNode(0);
    cluster.transitionToActive(0);
    // let the block reports be processed.
    Thread.sleep(2000);
    FSDataInputStream is = dfs.open(filePath);
    is.close();
    dfs.recoverLease(filePath);// initiate recovery
    assertTrue("Recovery also should be success", dfs.recoverLease(filePath));
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:50,代码来源:TestHASafeMode.java

示例14: testTimeoutMetric

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testTimeoutMetric() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  final Path path = new Path("/test");

  final MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(2).build();

  final List<FSDataOutputStream> streams = Lists.newArrayList();
  try {
    final FSDataOutputStream out =
        cluster.getFileSystem().create(path, (short) 2);
    final DataNodeFaultInjector injector = Mockito.mock
        (DataNodeFaultInjector.class);
    Mockito.doThrow(new IOException("mock IOException")).
        when(injector).
        writeBlockAfterFlush();
    DataNodeFaultInjector.instance = injector;
    streams.add(out);
    out.writeBytes("old gs data\n");
    out.hflush();

    /* Test the metric. */
    final MetricsRecordBuilder dnMetrics =
        getMetrics(cluster.getDataNodes().get(0).getMetrics().name());
    assertCounter("DatanodeNetworkErrors", 1L, dnMetrics);

    /* Test JMX datanode network counts. */
    final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    final ObjectName mxbeanName =
        new ObjectName("Hadoop:service=DataNode,name=DataNodeInfo");
    final Object dnc =
        mbs.getAttribute(mxbeanName, "DatanodeNetworkCounts");
    final String allDnc = dnc.toString();
    assertTrue("expected to see loopback address",
        allDnc.indexOf("127.0.0.1") >= 0);
    assertTrue("expected to see networkErrors",
        allDnc.indexOf("networkErrors") >= 0);
  } finally {
    IOUtils.cleanup(LOG, streams.toArray(new Closeable[0]));
    if (cluster != null) {
      cluster.shutdown();
    }
    DataNodeFaultInjector.instance = new DataNodeFaultInjector();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:47,代码来源:TestDataNodeMetrics.java

示例15: testRBWReportArrivesAfterEdits

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * Another regression test for HDFS-2742. This tests the following sequence:
 * - DN does a block report while file is open. This BR contains
 *   the block in RBW state.
 * - The block report is delayed in reaching the standby.
 * - The file is closed.
 * - The standby processes the OP_ADD and OP_CLOSE operations before
 *   the RBW block report arrives.
 * - The standby should not mark the block as corrupt.
 */
@Test
public void testRBWReportArrivesAfterEdits() throws Exception {
  final CountDownLatch brFinished = new CountDownLatch(1);
  DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG) {
    @Override
    protected Object passThrough(InvocationOnMock invocation)
        throws Throwable {
      try {
        return super.passThrough(invocation);
      } finally {
        // inform the test that our block report went through.
        brFinished.countDown();
      }
    }
  };

  FSDataOutputStream out = fs.create(TEST_FILE_PATH);
  try {
    AppendTestUtil.write(out, 0, 10);
    out.hflush();

    DataNode dn = cluster.getDataNodes().get(0);
    DatanodeProtocolClientSideTranslatorPB spy =
      DataNodeTestUtils.spyOnBposToNN(dn, nn2);
    
    Mockito.doAnswer(delayer)
      .when(spy).blockReport(
        Mockito.<DatanodeRegistration>anyObject(),
        Mockito.anyString(),
        Mockito.<StorageBlockReport[]>anyObject(),
        Mockito.<BlockReportContext>anyObject());
    dn.scheduleAllBlockReport(0);
    delayer.waitForCall();
    
  } finally {
    IOUtils.closeStream(out);
  }

  cluster.transitionToStandby(0);
  cluster.transitionToActive(1);
  
  delayer.proceed();
  brFinished.await();
  
  // Verify that no replicas are marked corrupt, and that the
  // file is readable from the failed-over standby.
  BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
  BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager());
  assertEquals(0, nn1.getNamesystem().getCorruptReplicaBlocks());
  assertEquals(0, nn2.getNamesystem().getCorruptReplicaBlocks());
  
  DFSTestUtil.readFile(fs, TEST_FILE_PATH);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:64,代码来源:TestDNFencing.java


注:本文中的org.apache.hadoop.fs.FSDataOutputStream.hflush方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。