当前位置: 首页>>代码示例>>Java>>正文


Java HdfsDataOutputStream类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.client.HdfsDataOutputStream的典型用法代码示例。如果您正苦于以下问题:Java HdfsDataOutputStream类的具体用法?Java HdfsDataOutputStream怎么用?Java HdfsDataOutputStream使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


HdfsDataOutputStream类属于org.apache.hadoop.hdfs.client包,在下文中一共展示了HdfsDataOutputStream类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createWrappedOutputStream

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入依赖的package包/类
/**
 * Wraps the stream in a CryptoOutputStream if the underlying file is
 * encrypted.
 */
public HdfsDataOutputStream createWrappedOutputStream(DFSOutputStream dfsos,
    FileSystem.Statistics statistics, long startPos) throws IOException {
  final FileEncryptionInfo feInfo = dfsos.getFileEncryptionInfo();
  if (feInfo != null) {
    // File is encrypted, wrap the stream in a crypto stream.
    // Currently only one version, so no special logic based on the version #
    getCryptoProtocolVersion(feInfo);
    final CryptoCodec codec = getCryptoCodec(conf, feInfo);
    KeyVersion decrypted = decryptEncryptedDataEncryptionKey(feInfo);
    final CryptoOutputStream cryptoOut =
        new CryptoOutputStream(dfsos, codec,
            decrypted.getMaterial(), feInfo.getIV(), startPos);
    return new HdfsDataOutputStream(cryptoOut, statistics, startPos);
  } else {
    // No FileEncryptionInfo present so no encryption.
    return new HdfsDataOutputStream(dfsos, statistics, startPos);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:DFSClient.java

示例2: prepare

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入依赖的package包/类
@Override
void prepare() throws Exception {
  final Path filePath = new Path(file);
  DFSTestUtil.createFile(dfs, filePath, BlockSize, DataNodes, 0);
  // append to the file and leave the last block under construction
  out = this.client.append(file, BlockSize, EnumSet.of(CreateFlag.APPEND),
      null, null);
  byte[] appendContent = new byte[100];
  new Random().nextBytes(appendContent);
  out.write(appendContent);
  ((HdfsDataOutputStream) out).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  
  LocatedBlocks blks = dfs.getClient()
      .getLocatedBlocks(file, BlockSize + 1);
  assertEquals(1, blks.getLocatedBlocks().size());
  nodes = blks.get(0).getLocations();
  oldBlock = blks.get(0).getBlock();
  
  LocatedBlock newLbk = client.getNamenode().updateBlockForPipeline(
      oldBlock, client.getClientName());
  newBlock = new ExtendedBlock(oldBlock.getBlockPoolId(),
      oldBlock.getBlockId(), oldBlock.getNumBytes(), 
      newLbk.getBlock().getGenerationStamp());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestRetryCacheWithHA.java

示例3: testLease

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入依赖的package包/类
@Test
public void testLease() throws Exception {
  try {
    NameNodeAdapter.setLeasePeriod(fsn, 100, 200);
    final Path foo = new Path(dir, "foo");
    final Path bar = new Path(foo, "bar");
    DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0);
    HdfsDataOutputStream out = appendFileWithoutClosing(bar, 100);
    out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");

    hdfs.delete(foo, true);
    Thread.sleep(1000);
    try {
      fsn.writeLock();
      NameNodeAdapter.getLeaseManager(fsn).runLeaseChecks();
    } finally {
      fsn.writeUnlock();
    }
  } finally {
    NameNodeAdapter.setLeasePeriod(fsn, HdfsConstants.LEASE_SOFTLIMIT_PERIOD,
        HdfsConstants.LEASE_HARDLIMIT_PERIOD);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:25,代码来源:TestINodeFileUnderConstructionWithSnapshot.java

示例4: hsyncWithSizeUpdate

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入依赖的package包/类
public void hsyncWithSizeUpdate() throws IOException {
  if (out != null) {
    if (out instanceof HdfsDataOutputStream) {
      try {
        ((HdfsDataOutputStream) out).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
      } catch (NoSuchMethodError e){
        // We are probably working with an older version of hadoop jars which does not have the 
        // hsync function with SyncFlag. Use the hsync version that does not update the size. 
        out.hsync();
      }
    }
    else {
        out.hsync();
    }
  }
}
 
开发者ID:gemxd,项目名称:gemfirexd-oss,代码行数:17,代码来源:SequenceFile.java

示例5: prepare

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入依赖的package包/类
@Override
void prepare() throws Exception {
  final Path filePath = new Path(file);
  DFSTestUtil.createFile(dfs, filePath, BlockSize, DataNodes, 0);
  // append to the file and leave the last block under construction
  out = this.client.append(file, BlockSize, null, null);
  byte[] appendContent = new byte[100];
  new Random().nextBytes(appendContent);
  out.write(appendContent);
  ((HdfsDataOutputStream) out).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  
  LocatedBlocks blks = dfs.getClient()
      .getLocatedBlocks(file, BlockSize + 1);
  assertEquals(1, blks.getLocatedBlocks().size());
  nodes = blks.get(0).getLocations();
  oldBlock = blks.get(0).getBlock();
  
  LocatedBlock newLbk = client.getNamenode().updateBlockForPipeline(
      oldBlock, client.getClientName());
  newBlock = new ExtendedBlock(oldBlock.getBlockPoolId(),
      oldBlock.getBlockId(), oldBlock.getNumBytes(), 
      newLbk.getBlock().getGenerationStamp());
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:24,代码来源:TestRetryCacheWithHA.java

示例6: testNoLogEntryBeforeClosing

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入依赖的package包/类
@Test
public void testNoLogEntryBeforeClosing() throws Exception {
  Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(2)
      .build();
  try {
    DistributedFileSystem dfs = cluster.getFileSystem();
    Path projects = new Path("/projects");
    Path project = new Path(projects, "project");
    final Path dataset = new Path(project, "dataset");
    Path file = new Path(dataset, "file");
    dfs.mkdirs(dataset, FsPermission.getDefault());
    dfs.setMetaEnabled(dataset, true);
    HdfsDataOutputStream out = TestFileCreation.create(dfs, file, 1);
    assertFalse(checkLog(TestUtil.getINodeId(cluster.getNameNode(), file),
        MetadataLogEntry.Operation.ADD));
    out.close();
    assertTrue(checkLog(TestUtil.getINodeId(cluster.getNameNode(), file),
        MetadataLogEntry.Operation.ADD));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:27,代码来源:TestMetadataLog.java

示例7: OpenFileCtx

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入依赖的package包/类
OpenFileCtx(HdfsDataOutputStream fos, Nfs3FileAttributes latestAttr,
    String dumpFilePath) {
  this.fos = fos;
  this.latestAttr = latestAttr;
  // We use the ReverseComparatorOnMin as the comparator of the map. In this
  // way, we first dump the data with larger offset. In the meanwhile, we
  // retrieve the last element to write back to HDFS.
  pendingWrites = new ConcurrentSkipListMap<OffsetRange, WriteCtx>(
      OffsetRange.ReverseComparatorOnMin);
  updateLastAccessTime();
  activeState = true;
  asyncStatus = false;
  dumpOut = null;
  raf = null;
  nonSequentialWriteInMemory = new AtomicLong(0);

  this.dumpFilePath = dumpFilePath;  
  enabledDump = dumpFilePath == null ? false: true;
  nextOffset = new AtomicLong();
  nextOffset.set(latestAttr.getSize());
  try {	
    assert(nextOffset.get() == this.fos.getPos());
  } catch (IOException e) {}
  dumpThread = null;
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:26,代码来源:OpenFileCtx.java

示例8: execute

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入依赖的package包/类
@Override
public void execute(List<TridentTuple> tuples) throws IOException {
    boolean rotated = false;
    synchronized (this.writeLock) {
        for (TridentTuple tuple : tuples) {
            byte[] bytes = this.format.format(tuple);
            out.write(bytes);
            this.offset += bytes.length;

            if (this.rotationPolicy.mark(tuple, this.offset)) {
                rotateOutputFile();
                this.offset = 0;
                this.rotationPolicy.reset();
                rotated = true;
            }
        }
        if (!rotated) {
            if (this.out instanceof HdfsDataOutputStream) {
                ((HdfsDataOutputStream) this.out).hsync(EnumSet.of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH));
            } else {
                this.out.hsync();
            }
        }
    }
}
 
开发者ID:ptgoetz,项目名称:storm-hdfs,代码行数:26,代码来源:HdfsState.java

示例9: OpenFileCtx

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入依赖的package包/类
OpenFileCtx(HdfsDataOutputStream fos, Nfs3FileAttributes latestAttr,
    String dumpFilePath, DFSClient client, IdMappingServiceProvider iug,
    boolean aixCompatMode, NfsConfiguration config) {
  this.fos = fos;
  this.latestAttr = latestAttr;
  this.aixCompatMode = aixCompatMode;
  // We use the ReverseComparatorOnMin as the comparator of the map. In this
  // way, we first dump the data with larger offset. In the meanwhile, we
  // retrieve the last element to write back to HDFS.
  pendingWrites = new ConcurrentSkipListMap<OffsetRange, WriteCtx>(
      OffsetRange.ReverseComparatorOnMin);
  
  pendingCommits = new ConcurrentSkipListMap<Long, CommitCtx>();
  
  updateLastAccessTime();
  activeState = true;
  asyncStatus = false;
  asyncWriteBackStartOffset = 0;
  dumpOut = null;
  raf = null;
  nonSequentialWriteInMemory = new AtomicLong(0);

  this.dumpFilePath = dumpFilePath;  
  enabledDump = dumpFilePath != null;
  nextOffset = new AtomicLong();
  nextOffset.set(latestAttr.getSize());
  try {	
    assert(nextOffset.get() == this.fos.getPos());
  } catch (IOException e) {}
  dumpThread = null;
  this.client = client;
  this.iug = iug;
  this.uploadLargeFile = config.getBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD,
      NfsConfigKeys.LARGE_FILE_UPLOAD_DEFAULT);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:OpenFileCtx.java

示例10: writeData

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入依赖的package包/类
public void writeData(HdfsDataOutputStream fos) throws IOException {
  Preconditions.checkState(fos != null);

  ByteBuffer dataBuffer;
  try {
    dataBuffer = getData();
  } catch (Exception e1) {
    LOG.error("Failed to get request data offset:" + offset + " count:"
        + count + " error:" + e1);
    throw new IOException("Can't get WriteCtx.data");
  }

  byte[] data = dataBuffer.array();
  int position = dataBuffer.position();
  int limit = dataBuffer.limit();
  Preconditions.checkState(limit - position == count);
  // Modified write has a valid original count
  if (position != 0) {
    if (limit != getOriginalCount()) {
      throw new IOException("Modified write has differnt original size."
          + "buff position:" + position + " buff limit:" + limit + ". "
          + toString());
    }
  }
  
  // Now write data
  fos.write(data, position, count);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:WriteCtx.java

示例11: testCheckCommitAixCompatMode

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入依赖的package包/类
@Test
public void testCheckCommitAixCompatMode() throws IOException {
  DFSClient dfsClient = Mockito.mock(DFSClient.class);
  Nfs3FileAttributes attr = new Nfs3FileAttributes();
  HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);

  NfsConfiguration conf = new NfsConfiguration();
  conf.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, false);
  // Enable AIX compatibility mode.
  OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient,
      new ShellBasedIdMapping(new NfsConfiguration()), true, conf);
  
  // Test fall-through to pendingWrites check in the event that commitOffset
  // is greater than the number of bytes we've so far flushed.
  Mockito.when(fos.getPos()).thenReturn((long) 2);
  COMMIT_STATUS status = ctx.checkCommitInternal(5, null, 1, attr, false);
  Assert.assertTrue(status == COMMIT_STATUS.COMMIT_FINISHED);
  
  // Test the case when we actually have received more bytes than we're trying
  // to commit.
  ctx.getPendingWritesForTest().put(new OffsetRange(0, 10),
      new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
  Mockito.when(fos.getPos()).thenReturn((long) 10);
  ctx.setNextOffsetForTest((long)10);
  status = ctx.checkCommitInternal(5, null, 1, attr, false);
  Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestWrites.java

示例12: testCheckSequential

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入依赖的package包/类
@Test
public void testCheckSequential() throws IOException {
  DFSClient dfsClient = Mockito.mock(DFSClient.class);
  Nfs3FileAttributes attr = new Nfs3FileAttributes();
  HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
  Mockito.when(fos.getPos()).thenReturn((long) 0);
  NfsConfiguration config = new NfsConfiguration();

  config.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, false);
  OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient,
      new ShellBasedIdMapping(config), false, config);
  
  ctx.getPendingWritesForTest().put(new OffsetRange(5, 10),
      new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
  ctx.getPendingWritesForTest().put(new OffsetRange(10, 15),
      new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
  ctx.getPendingWritesForTest().put(new OffsetRange(20, 25),
      new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));

  assertTrue(!ctx.checkSequential(5, 4));
  assertTrue(ctx.checkSequential(9, 5));
  assertTrue(ctx.checkSequential(10, 5));
  assertTrue(ctx.checkSequential(14, 5));
  assertTrue(!ctx.checkSequential(15, 5));
  assertTrue(!ctx.checkSequential(20, 5));
  assertTrue(!ctx.checkSequential(25, 5));
  assertTrue(!ctx.checkSequential(999, 5));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestWrites.java

示例13: createInternal

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入依赖的package包/类
@Override
public HdfsDataOutputStream createInternal(Path f,
    EnumSet<CreateFlag> createFlag, FsPermission absolutePermission,
    int bufferSize, short replication, long blockSize, Progressable progress,
    ChecksumOpt checksumOpt, boolean createParent) throws IOException {

  final DFSOutputStream dfsos = dfs.primitiveCreate(getUriPath(f),
    absolutePermission, createFlag, createParent, replication, blockSize,
    progress, bufferSize, checksumOpt);
  return dfs.createWrappedOutputStream(dfsos, statistics,
      dfsos.getInitialLen());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:Hdfs.java

示例14: primitiveCreate

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入依赖的package包/类
@Override
protected HdfsDataOutputStream primitiveCreate(Path f,
  FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
  short replication, long blockSize, Progressable progress,
  ChecksumOpt checksumOpt) throws IOException {
  statistics.incrementWriteOps(1);
  final DFSOutputStream dfsos = dfs.primitiveCreate(
    getPathName(fixRelativePart(f)),
    absolutePermission, flag, true, replication, blockSize,
    progress, bufferSize, checksumOpt);
  return dfs.createWrappedOutputStream(dfsos, statistics);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:DistributedFileSystem.java

示例15: SlowWriter

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入依赖的package包/类
SlowWriter(DistributedFileSystem fs, Path filepath, final long sleepms
    ) throws IOException {
  super(SlowWriter.class.getSimpleName() + ":" + filepath);
  this.filepath = filepath;
  this.out = (HdfsDataOutputStream)fs.create(filepath, REPLICATION);
  this.sleepms = sleepms;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:TestReplaceDatanodeOnFailure.java


注:本文中的org.apache.hadoop.hdfs.client.HdfsDataOutputStream类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。