本文整理匯總了Java中org.apache.hadoop.fs.FSDataOutputStream.getWrappedStream方法的典型用法代碼示例。如果您正苦於以下問題:Java FSDataOutputStream.getWrappedStream方法的具體用法?Java FSDataOutputStream.getWrappedStream怎麽用?Java FSDataOutputStream.getWrappedStream使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.fs.FSDataOutputStream
的用法示例。
在下文中一共展示了FSDataOutputStream.getWrappedStream方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: run
import org.apache.hadoop.fs.FSDataOutputStream; //導入方法依賴的package包/類
@Override
public void run() {
System.out.println("Workload starting ");
for (int i = 0; i < numberOfFiles; i++) {
Path filename = new Path(id + "." + i);
try {
System.out.println("Workload processing file " + filename);
FSDataOutputStream stm = createFile(fs, filename, replication);
DFSOutputStream dfstream = (DFSOutputStream)
(stm.getWrappedStream());
dfstream.setArtificialSlowdown(1000);
writeFile(stm, myseed);
stm.close();
checkFile(fs, filename, replication, numBlocks, fileSize, myseed);
} catch (Throwable e) {
System.out.println("Workload exception " + e);
assertTrue(e.toString(), false);
}
// increment the stamp to indicate that another file is done.
synchronized (this) {
stamp++;
}
}
}
示例2: assertPartitionsWritten
import org.apache.hadoop.fs.FSDataOutputStream; //導入方法依賴的package包/類
/**
* Assert that the no. of partitions written matches expectations
* @param action operation (for use in the assertions)
* @param out output stream
* @param expected expected no. of partitions
*/
protected void assertPartitionsWritten(String action, FSDataOutputStream out,
long expected) {
OutputStream nativeStream = out.getWrappedStream();
int written = getPartitionsWritten(out);
if(written !=expected) {
Assert.fail(action + ": " +
TestSwiftFileSystemPartitionedUploads.WRONG_PARTITION_COUNT
+ " + expected: " + expected + " actual: " + written
+ " -- " + nativeStream);
}
}
示例3: MockFsDataOutputStream
import org.apache.hadoop.fs.FSDataOutputStream; //導入方法依賴的package包/類
public MockFsDataOutputStream(FSDataOutputStream wrapMe, boolean closeSucceed)
throws IOException {
super(wrapMe.getWrappedStream(), null);
this.closeSucceed = closeSucceed;
}
示例4: testRoundTripAckMetric
import org.apache.hadoop.fs.FSDataOutputStream; //導入方法依賴的package包/類
/**
* Tests that round-trip acks in a datanode write pipeline are correctly
* measured.
*/
@Test
public void testRoundTripAckMetric() throws Exception {
final int datanodeCount = 2;
final int interval = 1;
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
datanodeCount).build();
try {
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
// Open a file and get the head of the pipeline
Path testFile = new Path("/testRoundTripAckMetric.txt");
FSDataOutputStream fsout = fs.create(testFile, (short) datanodeCount);
DFSOutputStream dout = (DFSOutputStream) fsout.getWrappedStream();
// Slow down the writes to catch the write pipeline
dout.setChunksPerPacket(5);
dout.setArtificialSlowdown(3000);
fsout.write(new byte[10000]);
DatanodeInfo[] pipeline = null;
int count = 0;
while (pipeline == null && count < 5) {
pipeline = dout.getPipeline();
System.out.println("Waiting for pipeline to be created.");
Thread.sleep(1000);
count++;
}
// Get the head node that should be receiving downstream acks
DatanodeInfo headInfo = pipeline[0];
DataNode headNode = null;
for (DataNode datanode : cluster.getDataNodes()) {
if (datanode.getDatanodeId().equals(headInfo)) {
headNode = datanode;
break;
}
}
assertNotNull("Could not find the head of the datanode write pipeline",
headNode);
// Close the file and wait for the metrics to rollover
Thread.sleep((interval + 1) * 1000);
// Check the ack was received
MetricsRecordBuilder dnMetrics = getMetrics(headNode.getMetrics()
.name());
assertTrue("Expected non-zero number of acks",
getLongCounter("PacketAckRoundTripTimeNanosNumOps", dnMetrics) > 0);
assertQuantileGauges("PacketAckRoundTripTimeNanos" + interval
+ "s", dnMetrics);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
示例5: getSwiftNativeOutputStream
import org.apache.hadoop.fs.FSDataOutputStream; //導入方法依賴的package包/類
private static SwiftNativeOutputStream getSwiftNativeOutputStream(
FSDataOutputStream outputStream) {
OutputStream wrappedStream = outputStream.getWrappedStream();
return (SwiftNativeOutputStream) wrappedStream;
}
示例6: getOutputStream
import org.apache.hadoop.fs.FSDataOutputStream; //導入方法依賴的package包/類
/**
* Currently, we need to expose the writer's OutputStream to tests so that they can manipulate
* the default behavior (such as setting the maxRecoveryErrorCount value for example (see
* {@link TestWALReplay#testReplayEditsWrittenIntoWAL()}). This is done using reflection on the
* underlying HDFS OutputStream.
* NOTE: This could be removed once Hadoop1 support is removed.
* @return null if underlying stream is not ready.
*/
@VisibleForTesting
OutputStream getOutputStream() {
FSDataOutputStream fsdos = this.hdfs_out;
if (fsdos == null) return null;
return fsdos.getWrappedStream();
}