当前位置: 首页>>代码示例>>Java>>正文


Java DFSOutputStream类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DFSOutputStream的典型用法代码示例。如果您正苦于以下问题:Java DFSOutputStream类的具体用法?Java DFSOutputStream怎么用?Java DFSOutputStream使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


DFSOutputStream类属于org.apache.hadoop.hdfs包,在下文中一共展示了DFSOutputStream类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: invoke

import org.apache.hadoop.hdfs.DFSOutputStream; //导入依赖的package包/类
@Override
void invoke() throws Exception {
  DatanodeInfo[] newNodes = new DatanodeInfo[2];
  newNodes[0] = nodes[0];
  newNodes[1] = nodes[1];
  String[] storageIDs = {"s0", "s1"};
  
  client.getNamenode().updatePipeline(client.getClientName(), oldBlock,
      newBlock, newNodes, storageIDs);
  // close can fail if the out.close() commit the block after block received
  // notifications from Datanode.
  // Since datanodes and output stream have still old genstamps, these
  // blocks will be marked as corrupt after HDFS-5723 if RECEIVED
  // notifications reaches namenode first and close() will fail.
  DFSTestUtil.abortStream((DFSOutputStream) out.getWrappedStream());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestRetryCacheWithHA.java

示例2: testAppendFileAfterRenameInSnapshot

import org.apache.hadoop.hdfs.DFSOutputStream; //导入依赖的package包/类
/**
 * Similar with testRenameUCFileInSnapshot, but do renaming first and then 
 * append file without closing it. Unit test for HDFS-5425.
 */
@Test
public void testAppendFileAfterRenameInSnapshot() throws Exception {
  final Path test = new Path("/test");
  final Path foo = new Path(test, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
  // rename bar --> bar2
  final Path bar2 = new Path(foo, "bar2");
  hdfs.rename(bar, bar2);
  // append file and keep it as underconstruction.
  FSDataOutputStream out = hdfs.append(bar2);
  out.writeByte(0);
  ((DFSOutputStream) out.getWrappedStream()).hsync(
      EnumSet.of(SyncFlag.UPDATE_LENGTH));

  // save namespace and restart
  restartClusterAndCheckImage(true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestRenameWithSnapshots.java

示例3: invoke

import org.apache.hadoop.hdfs.DFSOutputStream; //导入依赖的package包/类
@Override
void invoke() throws Exception {
  DatanodeInfo[] newNodes = new DatanodeInfo[2];
  newNodes[0] = nodes[0];
  newNodes[1] = nodes[1];
  final DatanodeManager dm = cluster.getNamesystem(0).getBlockManager()
      .getDatanodeManager();
  final String storageID1 = dm.getDatanode(newNodes[0]).getStorageInfos()[0]
      .getStorageID();
  final String storageID2 = dm.getDatanode(newNodes[1]).getStorageInfos()[0]
      .getStorageID();
  String[] storageIDs = {storageID1, storageID2};
  
  client.getNamenode().updatePipeline(client.getClientName(), oldBlock,
      newBlock, newNodes, storageIDs);
  // close can fail if the out.close() commit the block after block received
  // notifications from Datanode.
  // Since datanodes and output stream have still old genstamps, these
  // blocks will be marked as corrupt after HDFS-5723 if RECEIVED
  // notifications reaches namenode first and close() will fail.
  DFSTestUtil.abortStream((DFSOutputStream) out.getWrappedStream());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:23,代码来源:TestRetryCacheWithHA.java

示例4: testBlocksScheduledCounter

import org.apache.hadoop.hdfs.DFSOutputStream; //导入依赖的package包/类
public void testBlocksScheduledCounter() throws IOException {
  
  MiniDFSCluster cluster = new MiniDFSCluster(new Configuration(), 1, 
                                              true, null);
  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();
  
  //open a file an write a few bytes:
  FSDataOutputStream out = fs.create(new Path("/testBlockScheduledCounter"));
  for (int i=0; i<1024; i++) {
    out.write(i);
  }
  // flush to make sure a block is allocated.
  ((DFSOutputStream)(out.getWrappedStream())).sync();
  
  ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
  cluster.getNameNode().namesystem.DFSNodesStatus(dnList, dnList);
  DatanodeDescriptor dn = dnList.get(0);
  
  assertEquals(1, dn.getBlocksScheduled());
 
  // close the file and the counter should go to zero.
  out.close();   
  assertEquals(0, dn.getBlocksScheduled());
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:26,代码来源:TestBlocksScheduledCounter.java

示例5: createInternal

import org.apache.hadoop.hdfs.DFSOutputStream; //导入依赖的package包/类
@Override
public HdfsDataOutputStream createInternal(Path f,
    EnumSet<CreateFlag> createFlag, FsPermission absolutePermission,
    int bufferSize, short replication, long blockSize, Progressable progress,
    ChecksumOpt checksumOpt, boolean createParent) throws IOException {

  final DFSOutputStream dfsos = dfs.primitiveCreate(getUriPath(f),
    absolutePermission, createFlag, createParent, replication, blockSize,
    progress, bufferSize, checksumOpt);
  return dfs.createWrappedOutputStream(dfsos, statistics,
      dfsos.getInitialLen());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:Hdfs.java

示例6: hsync

import org.apache.hadoop.hdfs.DFSOutputStream; //导入依赖的package包/类
/**
 * Sync buffered data to DataNodes (flush to disk devices).
 * 
 * @param syncFlags
 *          Indicate the detailed semantic and actions of the hsync.
 * @throws IOException
 * @see FSDataOutputStream#hsync()
 */
public void hsync(EnumSet<SyncFlag> syncFlags) throws IOException {
  OutputStream wrappedStream = getWrappedStream();
  if (wrappedStream instanceof CryptoOutputStream) {
    ((CryptoOutputStream) wrappedStream).flush();
    wrappedStream = ((CryptoOutputStream) wrappedStream).getWrappedStream();
  }
  ((DFSOutputStream) wrappedStream).hsync(syncFlags);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:HdfsDataOutputStream.java

示例7: testUpdateQuotaForFSync

import org.apache.hadoop.hdfs.DFSOutputStream; //导入依赖的package包/类
/**
 * Test if the quota can be correctly updated when file length is updated
 * through fsync
 */
@Test (timeout=60000)
public void testUpdateQuotaForFSync() throws Exception {
  final Path foo = new Path("/foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(dfs, bar, BLOCKSIZE, REPLICATION, 0L);
  dfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);

  FSDataOutputStream out = dfs.append(bar);
  out.write(new byte[BLOCKSIZE / 4]);
  ((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet.of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH));

  INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
  QuotaCounts quota = fooNode.getDirectoryWithQuotaFeature()
      .getSpaceConsumed();
  long ns = quota.getNameSpace();
  long ds = quota.getStorageSpace();
  assertEquals(2, ns); // foo and bar
  assertEquals(BLOCKSIZE * 2 * REPLICATION, ds); // file is under construction

  out.write(new byte[BLOCKSIZE / 4]);
  out.close();

  fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
  quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
  ns = quota.getNameSpace();
  ds = quota.getStorageSpace();
  assertEquals(2, ns);
  assertEquals((BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, ds);

  // append another block
  DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE);

  quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
  ns = quota.getNameSpace();
  ds = quota.getStorageSpace();
  assertEquals(2, ns); // foo and bar
  assertEquals((BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION, ds);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:43,代码来源:TestDiskspaceQuotaUpdate.java

示例8: testAddBlockUC

import org.apache.hadoop.hdfs.DFSOutputStream; //导入依赖的package包/类
/**
 * Test adding new blocks but without closing the corresponding the file
 */
@Test
public void testAddBlockUC() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path file1 = new Path("/file1");
  DFSTestUtil.createFile(fs, file1, BLOCKSIZE - 1, REPLICATION, 0L);
  
  FSDataOutputStream out = null;
  try {
    // append files without closing the streams
    out = fs.append(file1);
    String appendContent = "appending-content";
    out.writeBytes(appendContent);
    ((DFSOutputStream) out.getWrappedStream()).hsync(
        EnumSet.of(SyncFlag.UPDATE_LENGTH));
    
    // restart NN
    cluster.restartNameNode(true);
    FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
    
    INodeFile fileNode = fsdir.getINode4Write(file1.toString()).asFile();
    BlockInfoContiguous[] fileBlocks = fileNode.getBlocks();
    assertEquals(2, fileBlocks.length);
    assertEquals(BLOCKSIZE, fileBlocks[0].getNumBytes());
    assertEquals(BlockUCState.COMPLETE, fileBlocks[0].getBlockUCState());
    assertEquals(appendContent.length() - 1, fileBlocks[1].getNumBytes());
    assertEquals(BlockUCState.UNDER_CONSTRUCTION,
        fileBlocks[1].getBlockUCState());
  } finally {
    if (out != null) {
      out.close();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TestAddBlock.java

示例9: getPipeLine

import org.apache.hadoop.hdfs.DFSOutputStream; //导入依赖的package包/类
/**
 * This method gets the pipeline for the current WAL.
 */
@VisibleForTesting
DatanodeInfo[] getPipeLine() {
  if (this.hdfs_out != null) {
    if (this.hdfs_out.getWrappedStream() instanceof DFSOutputStream) {
      return ((DFSOutputStream) this.hdfs_out.getWrappedStream()).getPipeline();
    }
  }
  return new DatanodeInfo[0];
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:FSHLog.java

示例10: HdfsDataOutputStream

import org.apache.hadoop.hdfs.DFSOutputStream; //导入依赖的package包/类
public HdfsDataOutputStream(CryptoOutputStream out,
    FileSystem.Statistics stats, long startPosition) throws IOException {
  super(out, stats, startPosition);
  Preconditions.checkArgument(
      out.getWrappedStream() instanceof DFSOutputStream,
      "CryptoOutputStream should wrap a DFSOutputStream");
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:8,代码来源:HdfsDataOutputStream.java

示例11: hsync

import org.apache.hadoop.hdfs.DFSOutputStream; //导入依赖的package包/类
/**
 * Sync buffered data to DataNodes (flush to disk devices).
 *
 * @param syncFlags
 *          Indicate the detailed semantic and actions of the hsync.
 * @throws IOException
 * @see FSDataOutputStream#hsync()
 */
public void hsync(EnumSet<SyncFlag> syncFlags) throws IOException {
  OutputStream wrappedStream = getWrappedStream();
  if (wrappedStream instanceof CryptoOutputStream) {
    wrappedStream.flush();
    wrappedStream = ((CryptoOutputStream) wrappedStream).getWrappedStream();
  }
  ((DFSOutputStream) wrappedStream).hsync(syncFlags);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:17,代码来源:HdfsDataOutputStream.java

示例12: put

import org.apache.hadoop.hdfs.DFSOutputStream; //导入依赖的package包/类
public synchronized void put(final long inodeId, final DFSOutputStream out,
    final DFSClient dfsc) {
  if (dfsc.isClientRunning()) {
    if (!isRunning() || isRenewerExpired()) {
      //start a new deamon with a new id.
      final int id = ++currentId;
      daemon = new Daemon(new Runnable() {
        @Override
        public void run() {
          try {
            if (LOG.isDebugEnabled()) {
              LOG.debug("Lease renewer daemon for " + clientsString()
                  + " with renew id " + id + " started");
            }
            LeaseRenewer.this.run(id);
          } catch(InterruptedException e) {
            LOG.debug("LeaseRenewer is interrupted.", e);
          } finally {
            synchronized(LeaseRenewer.this) {
              Factory.INSTANCE.remove(LeaseRenewer.this);
            }
            if (LOG.isDebugEnabled()) {
              LOG.debug("Lease renewer daemon for " + clientsString()
                  + " with renew id " + id + " exited");
            }
          }
        }

        @Override
        public String toString() {
          return String.valueOf(LeaseRenewer.this);
        }
      });
      daemon.start();
    }
    dfsc.putFileBeingWritten(inodeId, out);
    emptyTime = Long.MAX_VALUE;
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:40,代码来源:LeaseRenewer.java

示例13: testRenewal

import org.apache.hadoop.hdfs.DFSOutputStream; //导入依赖的package包/类
@Test
public void testRenewal() throws Exception {
  // Keep track of how many times the lease gets renewed
  final AtomicInteger leaseRenewalCount = new AtomicInteger();
  Mockito.doAnswer(new Answer<Boolean>() {
    @Override
    public Boolean answer(InvocationOnMock invocation) throws Throwable {
      leaseRenewalCount.incrementAndGet();
      return true;
    }
  }).when(MOCK_DFSCLIENT).renewLease();


  // Set up a file so that we start renewing our lease.
  DFSOutputStream mockStream = Mockito.mock(DFSOutputStream.class);
  long fileId = 123L;
  renewer.put(fileId, mockStream, MOCK_DFSCLIENT);

  // Wait for lease to get renewed
  long failTime = Time.monotonicNow() + 5000;
  while (Time.monotonicNow() < failTime &&
      leaseRenewalCount.get() == 0) {
    Thread.sleep(50);
  }
  if (leaseRenewalCount.get() == 0) {
    Assert.fail("Did not renew lease at all!");
  }

  renewer.closeFile(fileId, MOCK_DFSCLIENT);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:31,代码来源:TestLeaseRenewer.java

示例14: testThreadName

import org.apache.hadoop.hdfs.DFSOutputStream; //导入依赖的package包/类
@Test
public void testThreadName() throws Exception {
  DFSOutputStream mockStream = Mockito.mock(DFSOutputStream.class);
  long fileId = 789L;
  Assert.assertFalse("Renewer not initially running",
      renewer.isRunning());

  // Pretend to open a file
  renewer.put(fileId, mockStream, MOCK_DFSCLIENT);

  Assert.assertTrue("Renewer should have started running",
      renewer.isRunning());

  // Check the thread name is reasonable
  String threadName = renewer.getDaemonName();
  Assert.assertEquals("LeaseRenewer:[email protected]://nn1/", threadName);

  // Pretend to close the file
  renewer.closeFile(fileId, MOCK_DFSCLIENT);
  renewer.setEmptyTime(Time.monotonicNow());

  // Should stop the renewer running within a few seconds
  long failTime = Time.monotonicNow() + 5000;
  while (renewer.isRunning() && Time.monotonicNow() < failTime) {
    Thread.sleep(50);
  }
  Assert.assertFalse(renewer.isRunning());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:29,代码来源:TestLeaseRenewer.java

示例15: testAddBlockUC

import org.apache.hadoop.hdfs.DFSOutputStream; //导入依赖的package包/类
/**
 * Test adding new blocks but without closing the corresponding the file
 */
@Test
public void testAddBlockUC() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path file1 = new Path("/file1");
  DFSTestUtil.createFile(fs, file1, BLOCKSIZE - 1, REPLICATION, 0L);
  
  FSDataOutputStream out = null;
  try {
    // append files without closing the streams
    out = fs.append(file1);
    String appendContent = "appending-content";
    out.writeBytes(appendContent);
    ((DFSOutputStream) out.getWrappedStream()).hsync(
        EnumSet.of(SyncFlag.UPDATE_LENGTH));
    
    // restart NN
    cluster.restartNameNode(true);
    FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
    
    INodeFile fileNode = fsdir.getINode4Write(file1.toString()).asFile();
    BlockInfo[] fileBlocks = fileNode.getBlocks();
    assertEquals(2, fileBlocks.length);
    assertEquals(BLOCKSIZE, fileBlocks[0].getNumBytes());
    assertEquals(BlockUCState.COMPLETE, fileBlocks[0].getBlockUCState());
    assertEquals(appendContent.length() - 1, fileBlocks[1].getNumBytes());
    assertEquals(BlockUCState.UNDER_CONSTRUCTION,
        fileBlocks[1].getBlockUCState());
  } finally {
    if (out != null) {
      out.close();
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:37,代码来源:TestAddBlock.java


注:本文中的org.apache.hadoop.hdfs.DFSOutputStream类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。