当前位置: 首页>>代码示例>>Java>>正文


Java HdfsDataOutputStream.write方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.client.HdfsDataOutputStream.write方法的典型用法代码示例。如果您正苦于以下问题:Java HdfsDataOutputStream.write方法的具体用法?Java HdfsDataOutputStream.write怎么用?Java HdfsDataOutputStream.write使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.client.HdfsDataOutputStream的用法示例。


在下文中一共展示了HdfsDataOutputStream.write方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: writeData

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
public void writeData(HdfsDataOutputStream fos) throws IOException {
  Preconditions.checkState(fos != null);

  ByteBuffer dataBuffer;
  try {
    dataBuffer = getData();
  } catch (Exception e1) {
    LOG.error("Failed to get request data offset:" + offset + " count:"
        + count + " error:" + e1);
    throw new IOException("Can't get WriteCtx.data");
  }

  byte[] data = dataBuffer.array();
  int position = dataBuffer.position();
  int limit = dataBuffer.limit();
  Preconditions.checkState(limit - position == count);
  // Modified write has a valid original count
  if (position != 0) {
    if (limit != getOriginalCount()) {
      throw new IOException("Modified write has differnt original size."
          + "buff position:" + position + " buff limit:" + limit + ". "
          + toString());
    }
  }
  
  // Now write data
  fos.write(data, position, count);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:WriteCtx.java

示例2: appendFileWithoutClosing

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
/** Append a file without closing the output stream */
private HdfsDataOutputStream appendFileWithoutClosing(Path file, int length)
    throws IOException {
  byte[] toAppend = new byte[length];
  Random random = new Random();
  random.nextBytes(toAppend);
  HdfsDataOutputStream out = (HdfsDataOutputStream) hdfs.append(file);
  out.write(toAppend);
  return out;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:TestFSImageWithSnapshot.java

示例3: appendFileWithoutClosing

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
private HdfsDataOutputStream appendFileWithoutClosing(Path file, int length)
    throws IOException {
  byte[] toAppend = new byte[length];
  Random random = new Random();
  random.nextBytes(toAppend);
  HdfsDataOutputStream out = (HdfsDataOutputStream) hdfs.append(file);
  out.write(toAppend);
  return out;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:TestINodeFileUnderConstructionWithSnapshot.java

示例4: modify

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
@Override
void modify() throws Exception {
  assertTrue(fs.exists(file));
  byte[] toAppend = new byte[appendLen];
  random.nextBytes(toAppend);

  out = (HdfsDataOutputStream)fs.append(file);
  out.write(toAppend);
  out.hsync(EnumSet.of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:TestSnapshot.java

示例5: writeData

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
public void writeData(HdfsDataOutputStream fos) throws IOException {
  Preconditions.checkState(fos != null);

  ByteBuffer dataBuffer;
  try {
    dataBuffer = getData();
  } catch (Exception e1) {
    LOG.error("Failed to get request data offset:" + getPlainOffset() + " " +
        "count:" + count + " error:" + e1);
    throw new IOException("Can't get WriteCtx.data");
  }

  byte[] data = dataBuffer.array();
  int position = dataBuffer.position();
  int limit = dataBuffer.limit();
  Preconditions.checkState(limit - position == count);
  // Modified write has a valid original count
  if (position != 0) {
    if (limit != getOriginalCount()) {
      throw new IOException("Modified write has differnt original size."
          + "buff position:" + position + " buff limit:" + limit + ". "
          + toString());
    }
  }
  
  // Now write data
  fos.write(data, position, count);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:29,代码来源:WriteCtx.java

示例6: testBlockSending

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
@Test
public void testBlockSending() throws IOException {
  DistributedFileSystem dfs = (DistributedFileSystem) getFileSystem();
  TestDfsClient testDfsClient = new TestDfsClient(getConfig());
  testDfsClient.injectIntoDfs(dfs);
  Util.createRandomFile(dfs, testFile, seed, TEST_BLOCK_COUNT,
      DFS_TEST_BLOCK_SIZE);

  FileStatus status = dfs.getFileStatus(testFile);
  LocatedBlock lb = dfs.getClient()
      .getLocatedBlocks(status.getPath().toUri().getPath(), 0, Long.MAX_VALUE)
      .get(0);
  DataNodeUtil.loseBlock(getCluster(), lb);
  List<LocatedBlock> lostBlocks = new ArrayList<LocatedBlock>();
  lostBlocks.add(lb);
  LocatedBlocks locatedBlocks =
      new LocatedBlocks(0, false, lostBlocks, null, true);
  testDfsClient.setMissingLocatedBlocks(locatedBlocks);
  LOG.info("Losing block " + lb.toString());

  HdfsDataOutputStream out = dfs.sendBlock(status.getPath(), lb, null, null);
  out.write(Util.randomBytes(seed,
      conf.getInt(DFS_BLOCK_SIZE_KEY, DFS_TEST_BLOCK_SIZE)), 0,
      DFS_TEST_BLOCK_SIZE);
  out.close();
  ExtendedBlock extendedBlock = new ExtendedBlock(lb.getBlock());
  extendedBlock.setBlockId(lb.getBlock().getBlockId());
  int number = getCluster().getAllBlockFiles(extendedBlock).length;
  Assert.assertEquals(conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT),
      number);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:32,代码来源:TestBlockSending.java

示例7: writeData

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
public void writeData(HdfsDataOutputStream fos) throws IOException {
  Preconditions.checkState(fos != null);

  ByteBuffer dataBuffer = null;
  try {
    dataBuffer = getData();
  } catch (Exception e1) {
    LOG.error(
        "Failed to get request data offset:" + offset + " count:" + count +
            " error:" + e1);
    throw new IOException("Can't get WriteCtx.data");
  }

  byte[] data = dataBuffer.array();
  int position = dataBuffer.position();
  int limit = dataBuffer.limit();
  Preconditions.checkState(limit - position == count);
  // Modified write has a valid original count
  if (position != 0) {
    if (limit != getOriginalCount()) {
      throw new IOException(
          "Modified write has differnt original size." + "buff position:" +
              position + " buff limit:" + limit + ". " + toString());
    }
  }
  
  // Now write data
  fos.write(data, position, count);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:30,代码来源:WriteCtx.java

示例8: writeData

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
public void writeData(HdfsDataOutputStream fos) throws IOException {
  Preconditions.checkState(fos != null);

  ByteBuffer dataBuffer = null;
  try {
    dataBuffer = getData();
  } catch (Exception e1) {
    LOG.error("Failed to get request data offset:" + offset + " count:"
        + count + " error:" + e1);
    throw new IOException("Can't get WriteCtx.data");
  }

  byte[] data = dataBuffer.array();
  int position = dataBuffer.position();
  int limit = dataBuffer.limit();
  Preconditions.checkState(limit - position == count);
  // Modified write has a valid original count
  if (position != 0) {
    if (limit != getOriginalCount()) {
      throw new IOException("Modified write has differnt original size."
          + "buff position:" + position + " buff limit:" + limit + ". "
          + toString());
    }
  }
  
  // Now write data
  fos.write(data, position, count);
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:29,代码来源:WriteCtx.java


注:本文中的org.apache.hadoop.hdfs.client.HdfsDataOutputStream.write方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。