当前位置: 首页>>代码示例>>Java>>正文


Java HdfsDataOutputStream.hsync方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.client.HdfsDataOutputStream.hsync方法的典型用法代码示例。如果您正苦于以下问题:Java HdfsDataOutputStream.hsync方法的具体用法?Java HdfsDataOutputStream.hsync怎么用?Java HdfsDataOutputStream.hsync使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.client.HdfsDataOutputStream的用法示例。


在下文中一共展示了HdfsDataOutputStream.hsync方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testLease

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
@Test
public void testLease() throws Exception {
  try {
    NameNodeAdapter.setLeasePeriod(fsn, 100, 200);
    final Path foo = new Path(dir, "foo");
    final Path bar = new Path(foo, "bar");
    DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0);
    HdfsDataOutputStream out = appendFileWithoutClosing(bar, 100);
    out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");

    hdfs.delete(foo, true);
    Thread.sleep(1000);
    try {
      fsn.writeLock();
      NameNodeAdapter.getLeaseManager(fsn).runLeaseChecks();
    } finally {
      fsn.writeUnlock();
    }
  } finally {
    NameNodeAdapter.setLeasePeriod(fsn, HdfsConstants.LEASE_SOFTLIMIT_PERIOD,
        HdfsConstants.LEASE_HARDLIMIT_PERIOD);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:25,代码来源:TestINodeFileUnderConstructionWithSnapshot.java

示例2: testLoadImageWithAppending

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
/**
 * Test the fsimage loading while there is file under construction.
 */
@Test (timeout=60000)
public void testLoadImageWithAppending() throws Exception {
  Path sub1 = new Path(dir, "sub1");
  Path sub1file1 = new Path(sub1, "sub1file1");
  Path sub1file2 = new Path(sub1, "sub1file2");
  DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed);
  
  hdfs.allowSnapshot(dir);
  hdfs.createSnapshot(dir, "s0");
  
  HdfsDataOutputStream out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));      
  
  // save namespace and restart cluster
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestFSImageWithSnapshot.java

示例3: modify

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
@Override
void modify() throws Exception {
  assertTrue(fs.exists(file));
  byte[] toAppend = new byte[appendLen];
  random.nextBytes(toAppend);

  out = (HdfsDataOutputStream)fs.append(file);
  out.write(toAppend);
  out.hsync(EnumSet.of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:TestSnapshot.java

示例4: testSaveLoadImageWithAppending

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
/**
 * Test the fsimage saving/loading while file appending.
 */
@Test (timeout=60000)
public void testSaveLoadImageWithAppending() throws Exception {
  Path sub1 = new Path(dir, "sub1");
  Path sub1file1 = new Path(sub1, "sub1file1");
  Path sub1file2 = new Path(sub1, "sub1file2");
  DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed);
  
  // 1. create snapshot s0
  hdfs.allowSnapshot(dir);
  hdfs.createSnapshot(dir, "s0");
  
  // 2. create snapshot s1 before appending sub1file1 finishes
  HdfsDataOutputStream out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  // also append sub1file2
  DFSTestUtil.appendFile(hdfs, sub1file2, BLOCKSIZE);
  hdfs.createSnapshot(dir, "s1");
  out.close();
  
  // 3. create snapshot s2 before appending finishes
  out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  hdfs.createSnapshot(dir, "s2");
  out.close();
  
  // 4. save fsimage before appending finishes
  out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  // dump fsdir
  File fsnBefore = dumpTree2File("before");
  // save the namesystem to a temp file
  File imageFile = saveFSImageToTempFile();
  
  // 5. load fsimage and compare
  // first restart the cluster, and format the cluster
  out.close();
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(true)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
  // then load the fsimage
  loadFSImageFromTempFile(imageFile);
  
  // dump the fsdir tree again
  File fsnAfter = dumpTree2File("after");
  
  // compare two dumped tree
  SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnAfter, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:56,代码来源:TestFSImageWithSnapshot.java

示例5: testSnapshotWhileAppending

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
/**
 * Test snapshot during file appending, before the corresponding
 * {@link FSDataOutputStream} instance closes.
 */
@Test (timeout=60000)
public void testSnapshotWhileAppending() throws Exception {
  Path file = new Path(dir, "file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
  
  // 1. append without closing stream --> create snapshot
  HdfsDataOutputStream out = appendFileWithoutClosing(file, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
  out.close();
  
  // check: an INodeFileUnderConstructionWithSnapshot should be stored into s0's
  // deleted list, with size BLOCKSIZE*2
  INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString());
  assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize());
  INodeDirectorySnapshottable dirNode = (INodeDirectorySnapshottable) fsdir
      .getINode(dir.toString());
  DirectoryDiff last = dirNode.getDiffs().getLast();
  Snapshot s0 = last.snapshot;
  
  // 2. append without closing stream
  out = appendFileWithoutClosing(file, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  
  // re-check nodeInDeleted_S0
  dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
  assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize(s0));
  
  // 3. take snapshot --> close stream
  hdfs.createSnapshot(dir, "s1");
  out.close();
  
  // check: an INodeFileUnderConstructionWithSnapshot with size BLOCKSIZE*3 should
  // have been stored in s1's deleted list
  fileNode = (INodeFile) fsdir.getINode(file.toString());
  dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
  last = dirNode.getDiffs().getLast();
  Snapshot s1 = last.snapshot;
  assertTrue(fileNode instanceof INodeFileWithSnapshot);
  assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(s1));
  
  // 4. modify file --> append without closing stream --> take snapshot -->
  // close stream
  hdfs.setReplication(file, (short) (REPLICATION - 1));
  out = appendFileWithoutClosing(file, BLOCKSIZE);
  hdfs.createSnapshot(dir, "s2");
  out.close();
  
  // re-check the size of nodeInDeleted_S1
  assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(s1));
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:56,代码来源:TestINodeFileUnderConstructionWithSnapshot.java

示例6: testSnapshotWhileAppending

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
/**
 * Test snapshot during file appending, before the corresponding
 * {@link FSDataOutputStream} instance closes.
 */
@Test (timeout=60000)
public void testSnapshotWhileAppending() throws Exception {
  Path file = new Path(dir, "file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
  
  // 1. append without closing stream --> create snapshot
  HdfsDataOutputStream out = appendFileWithoutClosing(file, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
  out.close();
  
  // check: an INodeFileUnderConstructionWithSnapshot should be stored into s0's
  // deleted list, with size BLOCKSIZE*2
  INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString());
  assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize());
  INodeDirectorySnapshottable dirNode = (INodeDirectorySnapshottable) fsdir
      .getINode(dir.toString());
  DirectoryDiff last = dirNode.getDiffs().getLast();
  
  // 2. append without closing stream
  out = appendFileWithoutClosing(file, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  
  // re-check nodeInDeleted_S0
  dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
  assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize(last.getSnapshotId()));
  
  // 3. take snapshot --> close stream
  hdfs.createSnapshot(dir, "s1");
  out.close();
  
  // check: an INodeFileUnderConstructionWithSnapshot with size BLOCKSIZE*3 should
  // have been stored in s1's deleted list
  fileNode = (INodeFile) fsdir.getINode(file.toString());
  dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
  last = dirNode.getDiffs().getLast();
  assertTrue(fileNode.isWithSnapshot());
  assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(last.getSnapshotId()));
  
  // 4. modify file --> append without closing stream --> take snapshot -->
  // close stream
  hdfs.setReplication(file, (short) (REPLICATION - 1));
  out = appendFileWithoutClosing(file, BLOCKSIZE);
  hdfs.createSnapshot(dir, "s2");
  out.close();
  
  // re-check the size of nodeInDeleted_S1
  assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(last.getSnapshotId()));
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:54,代码来源:TestINodeFileUnderConstructionWithSnapshot.java


注:本文中的org.apache.hadoop.hdfs.client.HdfsDataOutputStream.hsync方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。