当前位置: 首页>>代码示例>>Java>>正文


Java FSDataOutputStream.hsync方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FSDataOutputStream.hsync方法的典型用法代码示例。如果您正苦于以下问题:Java FSDataOutputStream.hsync方法的具体用法?Java FSDataOutputStream.hsync怎么用?Java FSDataOutputStream.hsync使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FSDataOutputStream的用法示例。


在下文中一共展示了FSDataOutputStream.hsync方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testInvalidateOverReplicatedBlock

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * Test over replicated block should get invalidated when decreasing the
 * replication for a partial block.
 */
@Test
public void testInvalidateOverReplicatedBlock() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
      .build();
  try {
    final FSNamesystem namesystem = cluster.getNamesystem();
    final BlockManager bm = namesystem.getBlockManager();
    FileSystem fs = cluster.getFileSystem();
    Path p = new Path(MiniDFSCluster.getBaseDirectory(), "/foo1");
    FSDataOutputStream out = fs.create(p, (short) 2);
    out.writeBytes("HDFS-3119: " + p);
    out.hsync();
    fs.setReplication(p, (short) 1);
    out.close();
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p);
    assertEquals("Expected only one live replica for the block", 1, bm
        .countNodes(block.getLocalBlock()).liveReplicas());
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestOverReplicatedBlocks.java

示例2: syncSlots

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
protected long syncSlots(FSDataOutputStream stream, ByteSlot[] slots, int offset, int count)
    throws IOException {
  long totalSynced = 0;
  for (int i = 0; i < count; ++i) {
    ByteSlot data = slots[offset + i];
    data.writeTo(stream);
    totalSynced += data.size();
  }

  if (useHsync) {
    stream.hsync();
  } else {
    stream.hflush();
  }
  sendPostSyncSignal();

  if (LOG.isTraceEnabled()) {
    LOG.trace("Sync slots=" + count + '/' + slots.length +
              ", flushed=" + StringUtils.humanSize(totalSynced));
  }
  return totalSynced;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:WALProcedureStore.java

示例3: testRBWFileCreationError

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
@Test(timeout = 30000)
public void testRBWFileCreationError() throws Exception {

  final short replication = 1;
  startCluster(BLOCK_SIZE, replication, -1);

  final FsVolumeImpl fsVolumeImpl = (FsVolumeImpl) cluster.getDataNodes()
      .get(0).getFSDataset().getVolumes().get(0);
  final String methodName = GenericTestUtils.getMethodName();
  final Path file = new Path("/" + methodName + ".01.dat");

  // Mock BlockPoolSlice so that RBW file creation gives IOExcception
  BlockPoolSlice blockPoolSlice = Mockito.mock(BlockPoolSlice.class);
  Mockito.when(blockPoolSlice.createRbwFile((Block) Mockito.any()))
      .thenThrow(new IOException("Synthetic IO Exception Throgh MOCK"));

  Field field = FsVolumeImpl.class.getDeclaredField("bpSlices");
  field.setAccessible(true);
  Map<String, BlockPoolSlice> bpSlices = (Map<String, BlockPoolSlice>) field
      .get(fsVolumeImpl);
  bpSlices.put(fsVolumeImpl.getBlockPoolList()[0], blockPoolSlice);

  try {
    // Write 1 byte to the file
    FSDataOutputStream os = fs.create(file, replication);
    os.write(new byte[1]);
    os.hsync();
    os.close();
    fail("Expecting IOException file creation failure");
  } catch (IOException e) {
    // Exception can be ignored (expected)
  }

  // Ensure RBW space reserved is released
  assertTrue("Expected ZERO but got " + fsVolumeImpl.getReservedForRbw(),
      fsVolumeImpl.getReservedForRbw() == 0);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestRbwSpaceReservation.java

示例4: writeToStream

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
public void writeToStream(FSDataOutputStream stream) throws IOException {
  Stopwatch watch = new Stopwatch();
  watch.start();
  available = false;
  check = ThreadLocalRandom.current().nextLong();
  start = stream.getPos();
  logger.debug("Writing check value {} at position {}", check, start);
  stream.writeLong(check);
  batch.getHeader().writeDelimitedTo(stream);
  ByteBuf buf = batch.getBody();
  if (buf != null) {
    bodyLength = buf.capacity();
  } else {
    bodyLength = 0;
  }
  if (bodyLength > 0) {
    buf.getBytes(0, stream, bodyLength);
  }
  stream.hsync();
  FileStatus status = fs.getFileStatus(path);
  long len = status.getLen();
  logger.debug("After spooling batch, stream at position {}. File length {}", stream.getPos(), len);
  batch.sendOk();
  latch.countDown();
  long t = watch.elapsed(TimeUnit.MICROSECONDS);
  logger.debug("Took {} us to spool {} to disk. Rate {} mb/s", t, bodyLength, bodyLength / t);
  if (buf != null) {
    buf.release();
  }
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:31,代码来源:SpoolingRawBatchBuffer.java

示例5: writeTestFile

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private void writeTestFile(String testFileName) throws Exception {
  Path filePath = new Path(testFileName);
  FSDataOutputStream stream = dfs.create(filePath);
  for (int i = 0; i < 10; i++) {
    byte[] data = RandomStringUtils.randomAlphabetic(102400).getBytes();
    stream.write(data);
  }
  stream.hsync();
  stream.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:TestTracing.java

示例6: testDeleteAddBlockRace

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private void testDeleteAddBlockRace(boolean hasSnapshot) throws Exception {
  try {
    conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
        SlowBlockPlacementPolicy.class, BlockPlacementPolicy.class);
    cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = cluster.getFileSystem();
    final String fileName = "/testDeleteAddBlockRace";
    Path filePath = new Path(fileName);

    FSDataOutputStream out = null;
    out = fs.create(filePath);
    if (hasSnapshot) {
      SnapshotTestHelper.createSnapshot((DistributedFileSystem) fs, new Path(
          "/"), "s1");
    }

    Thread deleteThread = new DeleteThread(fs, filePath);
    deleteThread.start();

    try {
      // write data and syn to make sure a block is allocated.
      out.write(new byte[32], 0, 32);
      out.hsync();
      Assert.fail("Should have failed.");
    } catch (FileNotFoundException e) {
      GenericTestUtils.assertExceptionContains(filePath.getName(), e);
    }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestDeleteRace.java

示例7: makeTestFile

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
protected final void makeTestFile(Path path, long length,
    boolean isLazyPersist) throws IOException {

  EnumSet<CreateFlag> createFlags = EnumSet.of(CREATE);

  if (isLazyPersist) {
    createFlags.add(LAZY_PERSIST);
  }

  FSDataOutputStream fos = null;
  try {
    fos =
        fs.create(path,
            FsPermission.getFileDefault(),
            createFlags,
            BUFFER_LENGTH,
            REPL_FACTOR,
            BLOCK_SIZE,
            null);

    // Allocate a block.
    byte[] buffer = new byte[BUFFER_LENGTH];
    for (int bytesWritten = 0; bytesWritten < length; ) {
      fos.write(buffer, 0, buffer.length);
      bytesWritten += buffer.length;
    }
    if (length > 0) {
      fos.hsync();
    }
  } finally {
    IOUtils.closeQuietly(fos);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:LazyPersistTestCase.java

示例8: testSpaceReleasedOnUnexpectedEof

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * Ensure that reserved space is released when the client goes away
 * unexpectedly.
 *
 * The verification is done for each replica in the write pipeline.
 *
 * @throws IOException
 */
@Test(timeout=300000)
public void testSpaceReleasedOnUnexpectedEof()
    throws IOException, InterruptedException, TimeoutException {
  final short replication = 3;
  startCluster(BLOCK_SIZE, replication, -1);

  final String methodName = GenericTestUtils.getMethodName();
  final Path file = new Path("/" + methodName + ".01.dat");

  // Write 1 byte to the file and kill the writer.
  FSDataOutputStream os = fs.create(file, replication);
  os.write(new byte[1]);
  os.hsync();
  DFSTestUtil.abortStream((DFSOutputStream) os.getWrappedStream());

  // Ensure all space reserved for the replica was released on each
  // DataNode.
  for (DataNode dn : cluster.getDataNodes()) {
    final FsVolumeImpl volume = (FsVolumeImpl) dn.getFSDataset().getVolumes().get(0);
    GenericTestUtils.waitFor(new Supplier<Boolean>() {
      @Override
      public Boolean get() {
        return (volume.getReservedForRbw() == 0);
      }
    }, 500, Integer.MAX_VALUE); // Wait until the test times out.
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestRbwSpaceReservation.java

示例9: testHSyncBlockBoundary

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/** Test hsync on an exact block boundary */
@Test
public void testHSyncBlockBoundary() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  final FileSystem fs = cluster.getFileSystem();
  
  final Path p = new Path("/testHSyncBlockBoundary/foo");
  final int len = 1 << 16;
  final byte[] fileContents = AppendTestUtil.initBuffer(len);
  FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
      4096, (short) 1, len, null);
  // fill exactly one block (tests the SYNC_BLOCK case) and flush
  out.write(fileContents, 0, len);
  out.hflush();
  // the full block should have caused a sync
  checkSyncMetric(cluster, 1);
  out.hsync();
  // first on block again
  checkSyncMetric(cluster, 1);
  // write one more byte and sync again
  out.write(1);
  out.hsync();
  checkSyncMetric(cluster, 2);
  out.close();
  checkSyncMetric(cluster, 3);
  cluster.shutdown();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestHSync.java

示例10: testHSyncWithReplication

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/** Test that syncBlock is correctly performed at replicas */
@Test
public void testHSyncWithReplication() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  final FileSystem fs = cluster.getFileSystem();

  final Path p = new Path("/testHSyncWithReplication/foo");
  final int len = 1 << 16;
  FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
      4096, (short) 3, len, null);
  out.write(1);
  out.hflush();
  checkSyncMetric(cluster, 0, 0);
  checkSyncMetric(cluster, 1, 0);
  checkSyncMetric(cluster, 2, 0);
  out.hsync();
  checkSyncMetric(cluster, 0, 1);
  checkSyncMetric(cluster, 1, 1);
  checkSyncMetric(cluster, 2, 1);
  out.hsync();
  checkSyncMetric(cluster, 0, 2);
  checkSyncMetric(cluster, 1, 2);
  checkSyncMetric(cluster, 2, 2);
  cluster.shutdown();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestHSync.java

示例11: testReceivePacketMetrics

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Test
public void testReceivePacketMetrics() throws Exception {
  Configuration conf = new HdfsConfiguration();
  final int interval = 1;
  conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    cluster.waitActive();
    DistributedFileSystem fs = cluster.getFileSystem();

    Path testFile = new Path("/testFlushNanosMetric.txt");
    FSDataOutputStream fout = fs.create(testFile);
    fout.write(new byte[1]);
    fout.hsync();
    fout.close();
    List<DataNode> datanodes = cluster.getDataNodes();
    DataNode datanode = datanodes.get(0);
    MetricsRecordBuilder dnMetrics = getMetrics(datanode.getMetrics().name());
    // Expect two flushes, 1 for the flush that occurs after writing, 
    // 1 that occurs on closing the data and metadata files.
    assertCounter("FlushNanosNumOps", 2L, dnMetrics);
    // Expect two syncs, one from the hsync, one on close.
    assertCounter("FsyncNanosNumOps", 2L, dnMetrics);
    // Wait for at least 1 rollover
    Thread.sleep((interval + 1) * 1000);
    // Check the receivePacket percentiles that should be non-zero
    String sec = interval + "s";
    assertQuantileGauges("FlushNanos" + sec, dnMetrics);
    assertQuantileGauges("FsyncNanos" + sec, dnMetrics);
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestDataNodeMetrics.java

示例12: writeToStream

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
public void writeToStream(FSDataOutputStream stream) throws IOException {
  Stopwatch watch = Stopwatch.createStarted();
  ByteBuf buf = null;
  try {
    check = ThreadLocalRandom.current().nextLong();
    start = stream.getPos();
    logger.debug("Writing check value {} at position {}", check, start);
    stream.writeLong(check);
    batch.getHeader().writeDelimitedTo(stream);
    buf = batch.getBody();
    if (buf != null) {
      bodyLength = buf.capacity();
    } else {
      bodyLength = 0;
    }
    if (bodyLength > 0) {
      buf.getBytes(0, stream, bodyLength);
    }
    stream.hsync();
    FileStatus status = spillFile.getFileStatus();
    long len = status.getLen();
    logger.debug("After spooling batch, stream at position {}. File length {}", stream.getPos(), len);
    long t = watch.elapsed(TimeUnit.MICROSECONDS);
    logger.debug("Took {} us to spool {} to disk. Rate {} mb/s", t, bodyLength, bodyLength / t);
  } finally {
    // even if the try block throws an exception we still want to send an ACK and release the lock
    // the caller will add the exception to deferred attribute and it will be thrown when the poll() method is called
    try {
      batch.sendOk(); // this can also throw an exception
    } finally {
      state = BatchState.SPILLED;
      batch = null;
      if (buf != null) {
        buf.release();
      }
    }
  }
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:39,代码来源:SpoolingRawBatchBuffer.java

示例13: newFilesystemChannel

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
public static HadoopFileChannel newFilesystemChannel(String filePath, Configuration conf) {
    Path path = new Path(filePath);
    FileSystem fs;
    FSDataOutputStream outputStream;
    try {
        fs = path.getFileSystem(conf);
        outputStream = fs.create(path, true, 1024);
    } catch (IOException e) {
        LOGGER.error(e.getMessage());
        throw new RuntimeException(e.getCause());
    }

    return new HadoopFileChannel()
    {
        @Override
        public boolean isOpen()
        {
            return true;
        }

        @Override
        public int write(ByteBuffer src) throws IOException
        {
            int toWrite = src.remaining();

            if (src.hasArray())
            {
                outputStream.write(src.array(), src.arrayOffset() + src.position(), src.remaining());
                src.position(src.limit());
                return toWrite;
            }

            if (toWrite < 16)
            {
                int offset = src.position();
                for (int i = 0 ; i < toWrite ; i++)
                    outputStream.write(src.get(i + offset));
                src.position(src.limit());
                return toWrite;
            }

            byte[] buf = retrieveTemporaryBuffer(toWrite);

            int totalWritten = 0;
            while (totalWritten < toWrite)
            {
                int toWriteThisTime = Math.min(buf.length, toWrite - totalWritten);

                org.apache.cassandra.utils.ByteBufferUtil.arrayCopy(src, src.position() + totalWritten, buf, 0, toWriteThisTime);

                outputStream.write(buf, 0, toWriteThisTime);

                totalWritten += toWriteThisTime;
            }

            src.position(src.limit());
            return totalWritten;
        }

        public void flush() throws IOException {
            outputStream.hsync();
        }

        @Override
        public void close() throws IOException {
            flush();
            outputStream.close();
        }
    };
}
 
开发者ID:Netflix,项目名称:sstable-adaptor,代码行数:71,代码来源:HadoopFileUtils.java

示例14: testBlockRecoveryWithLessMetafile

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * Block Recovery when the meta file not having crcs for all chunks in block
 * file
 */
@Test
public void testBlockRecoveryWithLessMetafile() throws Exception {
  Configuration conf = new Configuration();
  conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
      UserGroupInformation.getCurrentUser().getShortUserName());
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Path file = new Path("/testRecoveryFile");
  DistributedFileSystem dfs = cluster.getFileSystem();
  FSDataOutputStream out = dfs.create(file);
  int count = 0;
  while (count < 2 * 1024 * 1024) {
    out.writeBytes("Data");
    count += 4;
  }
  out.hsync();
  // abort the original stream
  ((DFSOutputStream) out.getWrappedStream()).abort();

  LocatedBlocks locations = cluster.getNameNodeRpc().getBlockLocations(
      file.toString(), 0, count);
  ExtendedBlock block = locations.get(0).getBlock();
  DataNode dn = cluster.getDataNodes().get(0);
  BlockLocalPathInfo localPathInfo = dn.getBlockLocalPathInfo(block, null);
  File metafile = new File(localPathInfo.getMetaPath());
  assertTrue(metafile.exists());

  // reduce the block meta file size
  RandomAccessFile raf = new RandomAccessFile(metafile, "rw");
  raf.setLength(metafile.length() - 20);
  raf.close();

  // restart DN to make replica to RWR
  DataNodeProperties dnProp = cluster.stopDataNode(0);
  cluster.restartDataNode(dnProp, true);

  // try to recover the lease
  DistributedFileSystem newdfs = (DistributedFileSystem) FileSystem
      .newInstance(cluster.getConfiguration(0));
  count = 0;
  while (++count < 10 && !newdfs.recoverLease(file)) {
    Thread.sleep(1000);
  }
  assertTrue("File should be closed", newdfs.recoverLease(file));

}
 
开发者ID:naver,项目名称:hadoop,代码行数:50,代码来源:TestLeaseRecovery.java

示例15: testReloadOnEditReplayFailure

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Test(timeout=30000)
public void testReloadOnEditReplayFailure () throws IOException {
  Configuration conf = new HdfsConfiguration();
  FSDataOutputStream fos = null;
  SecondaryNameNode secondary = null;
  MiniDFSCluster cluster = null;
  FileSystem fs = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
        .build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    secondary = startSecondaryNameNode(conf);
    fos = fs.create(new Path("tmpfile0"));
    fos.write(new byte[] { 0, 1, 2, 3 });
    secondary.doCheckpoint();
    fos.write(new byte[] { 0, 1, 2, 3 });
    fos.hsync();

    // Cause merge to fail in next checkpoint.
    Mockito.doThrow(new IOException(
        "Injecting failure during merge"))
        .when(faultInjector).duringMerge();

    try {
      secondary.doCheckpoint();
      fail("Fault injection failed.");
    } catch (IOException ioe) {
      // This is expected.
    } 
    Mockito.reset(faultInjector);
 
    // The error must be recorded, so next checkpoint will reload image.
    fos.write(new byte[] { 0, 1, 2, 3 });
    fos.hsync();
    
    assertTrue("Another checkpoint should have reloaded image",
        secondary.doCheckpoint());
  } finally {
    if (fs != null) {
      fs.close();
    }
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
    Mockito.reset(faultInjector);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:51,代码来源:TestCheckpoint.java


注:本文中的org.apache.hadoop.fs.FSDataOutputStream.hsync方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。