本文整理汇总了Java中org.apache.hadoop.hdfs.client.HdfsDataOutputStream.write方法的典型用法代码示例。如果您正苦于以下问题:Java HdfsDataOutputStream.write方法的具体用法?Java HdfsDataOutputStream.write怎么用?Java HdfsDataOutputStream.write使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.client.HdfsDataOutputStream
的用法示例。
在下文中一共展示了HdfsDataOutputStream.write方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: writeData
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
public void writeData(HdfsDataOutputStream fos) throws IOException {
Preconditions.checkState(fos != null);
ByteBuffer dataBuffer;
try {
dataBuffer = getData();
} catch (Exception e1) {
LOG.error("Failed to get request data offset:" + offset + " count:"
+ count + " error:" + e1);
throw new IOException("Can't get WriteCtx.data");
}
byte[] data = dataBuffer.array();
int position = dataBuffer.position();
int limit = dataBuffer.limit();
Preconditions.checkState(limit - position == count);
// Modified write has a valid original count
if (position != 0) {
if (limit != getOriginalCount()) {
throw new IOException("Modified write has differnt original size."
+ "buff position:" + position + " buff limit:" + limit + ". "
+ toString());
}
}
// Now write data
fos.write(data, position, count);
}
示例2: appendFileWithoutClosing
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
/** Append a file without closing the output stream */
private HdfsDataOutputStream appendFileWithoutClosing(Path file, int length)
throws IOException {
byte[] toAppend = new byte[length];
Random random = new Random();
random.nextBytes(toAppend);
HdfsDataOutputStream out = (HdfsDataOutputStream) hdfs.append(file);
out.write(toAppend);
return out;
}
示例3: appendFileWithoutClosing
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
private HdfsDataOutputStream appendFileWithoutClosing(Path file, int length)
throws IOException {
byte[] toAppend = new byte[length];
Random random = new Random();
random.nextBytes(toAppend);
HdfsDataOutputStream out = (HdfsDataOutputStream) hdfs.append(file);
out.write(toAppend);
return out;
}
示例4: modify
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
@Override
void modify() throws Exception {
assertTrue(fs.exists(file));
byte[] toAppend = new byte[appendLen];
random.nextBytes(toAppend);
out = (HdfsDataOutputStream)fs.append(file);
out.write(toAppend);
out.hsync(EnumSet.of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH));
}
示例5: writeData
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
public void writeData(HdfsDataOutputStream fos) throws IOException {
Preconditions.checkState(fos != null);
ByteBuffer dataBuffer;
try {
dataBuffer = getData();
} catch (Exception e1) {
LOG.error("Failed to get request data offset:" + getPlainOffset() + " " +
"count:" + count + " error:" + e1);
throw new IOException("Can't get WriteCtx.data");
}
byte[] data = dataBuffer.array();
int position = dataBuffer.position();
int limit = dataBuffer.limit();
Preconditions.checkState(limit - position == count);
// Modified write has a valid original count
if (position != 0) {
if (limit != getOriginalCount()) {
throw new IOException("Modified write has differnt original size."
+ "buff position:" + position + " buff limit:" + limit + ". "
+ toString());
}
}
// Now write data
fos.write(data, position, count);
}
示例6: testBlockSending
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
@Test
public void testBlockSending() throws IOException {
DistributedFileSystem dfs = (DistributedFileSystem) getFileSystem();
TestDfsClient testDfsClient = new TestDfsClient(getConfig());
testDfsClient.injectIntoDfs(dfs);
Util.createRandomFile(dfs, testFile, seed, TEST_BLOCK_COUNT,
DFS_TEST_BLOCK_SIZE);
FileStatus status = dfs.getFileStatus(testFile);
LocatedBlock lb = dfs.getClient()
.getLocatedBlocks(status.getPath().toUri().getPath(), 0, Long.MAX_VALUE)
.get(0);
DataNodeUtil.loseBlock(getCluster(), lb);
List<LocatedBlock> lostBlocks = new ArrayList<LocatedBlock>();
lostBlocks.add(lb);
LocatedBlocks locatedBlocks =
new LocatedBlocks(0, false, lostBlocks, null, true);
testDfsClient.setMissingLocatedBlocks(locatedBlocks);
LOG.info("Losing block " + lb.toString());
HdfsDataOutputStream out = dfs.sendBlock(status.getPath(), lb, null, null);
out.write(Util.randomBytes(seed,
conf.getInt(DFS_BLOCK_SIZE_KEY, DFS_TEST_BLOCK_SIZE)), 0,
DFS_TEST_BLOCK_SIZE);
out.close();
ExtendedBlock extendedBlock = new ExtendedBlock(lb.getBlock());
extendedBlock.setBlockId(lb.getBlock().getBlockId());
int number = getCluster().getAllBlockFiles(extendedBlock).length;
Assert.assertEquals(conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT),
number);
}
示例7: writeData
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
public void writeData(HdfsDataOutputStream fos) throws IOException {
Preconditions.checkState(fos != null);
ByteBuffer dataBuffer = null;
try {
dataBuffer = getData();
} catch (Exception e1) {
LOG.error(
"Failed to get request data offset:" + offset + " count:" + count +
" error:" + e1);
throw new IOException("Can't get WriteCtx.data");
}
byte[] data = dataBuffer.array();
int position = dataBuffer.position();
int limit = dataBuffer.limit();
Preconditions.checkState(limit - position == count);
// Modified write has a valid original count
if (position != 0) {
if (limit != getOriginalCount()) {
throw new IOException(
"Modified write has differnt original size." + "buff position:" +
position + " buff limit:" + limit + ". " + toString());
}
}
// Now write data
fos.write(data, position, count);
}
示例8: writeData
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
public void writeData(HdfsDataOutputStream fos) throws IOException {
Preconditions.checkState(fos != null);
ByteBuffer dataBuffer = null;
try {
dataBuffer = getData();
} catch (Exception e1) {
LOG.error("Failed to get request data offset:" + offset + " count:"
+ count + " error:" + e1);
throw new IOException("Can't get WriteCtx.data");
}
byte[] data = dataBuffer.array();
int position = dataBuffer.position();
int limit = dataBuffer.limit();
Preconditions.checkState(limit - position == count);
// Modified write has a valid original count
if (position != 0) {
if (limit != getOriginalCount()) {
throw new IOException("Modified write has differnt original size."
+ "buff position:" + position + " buff limit:" + limit + ". "
+ toString());
}
}
// Now write data
fos.write(data, position, count);
}