本文整理汇总了Java中org.apache.hadoop.hdfs.util.DataTransferThrottler.throttle方法的典型用法代码示例。如果您正苦于以下问题:Java DataTransferThrottler.throttle方法的具体用法?Java DataTransferThrottler.throttle怎么用?Java DataTransferThrottler.throttle使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.util.DataTransferThrottler
的用法示例。
在下文中一共展示了DataTransferThrottler.throttle方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testThrottler
import org.apache.hadoop.hdfs.util.DataTransferThrottler; //导入方法依赖的package包/类
@Test
public void testThrottler() throws IOException {
Configuration conf = new HdfsConfiguration();
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
long bandwidthPerSec = 1024*1024L;
final long TOTAL_BYTES =6*bandwidthPerSec;
long bytesToSend = TOTAL_BYTES;
long start = Time.monotonicNow();
DataTransferThrottler throttler = new DataTransferThrottler(bandwidthPerSec);
long totalBytes = 0L;
long bytesSent = 1024*512L; // 0.5MB
throttler.throttle(bytesSent);
bytesToSend -= bytesSent;
bytesSent = 1024*768L; // 0.75MB
throttler.throttle(bytesSent);
bytesToSend -= bytesSent;
try {
Thread.sleep(1000);
} catch (InterruptedException ignored) {}
throttler.throttle(bytesToSend);
long end = Time.monotonicNow();
assertTrue(totalBytes*1000/(end-start)<=bandwidthPerSec);
}
示例2: testThrottler
import org.apache.hadoop.hdfs.util.DataTransferThrottler; //导入方法依赖的package包/类
@Test
public void testThrottler() throws IOException {
Configuration conf = new HdfsConfiguration();
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
long bandwidthPerSec = 1024*1024L;
final long TOTAL_BYTES =6*bandwidthPerSec;
long bytesToSend = TOTAL_BYTES;
long start = Time.now();
DataTransferThrottler throttler = new DataTransferThrottler(bandwidthPerSec);
long totalBytes = 0L;
long bytesSent = 1024*512L; // 0.5MB
throttler.throttle(bytesSent);
bytesToSend -= bytesSent;
bytesSent = 1024*768L; // 0.75MB
throttler.throttle(bytesSent);
bytesToSend -= bytesSent;
try {
Thread.sleep(1000);
} catch (InterruptedException ignored) {}
throttler.throttle(bytesToSend);
long end = Time.now();
assertTrue(totalBytes*1000/(end-start)<=bandwidthPerSec);
}
示例3: testThrottler
import org.apache.hadoop.hdfs.util.DataTransferThrottler; //导入方法依赖的package包/类
@Test
public void testThrottler() throws IOException {
Configuration conf = new HdfsConfiguration();
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
long bandwidthPerSec = 1024 * 1024L;
final long TOTAL_BYTES = 6 * bandwidthPerSec;
long bytesToSend = TOTAL_BYTES;
long start = Time.now();
DataTransferThrottler throttler =
new DataTransferThrottler(bandwidthPerSec);
long totalBytes = 0L;
long bytesSent = 1024 * 512L; // 0.5MB
throttler.throttle(bytesSent);
bytesToSend -= bytesSent;
bytesSent = 1024 * 768L; // 0.75MB
throttler.throttle(bytesSent);
bytesToSend -= bytesSent;
try {
Thread.sleep(1000);
} catch (InterruptedException ignored) {
}
throttler.throttle(bytesToSend);
long end = Time.now();
assertTrue(totalBytes * 1000 / (end - start) <= bandwidthPerSec);
}
示例4: testThrottler
import org.apache.hadoop.hdfs.util.DataTransferThrottler; //导入方法依赖的package包/类
public void testThrottler() throws IOException {
Configuration conf = new HdfsConfiguration();
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
long bandwidthPerSec = 1024*1024L;
final long TOTAL_BYTES =6*bandwidthPerSec;
long bytesToSend = TOTAL_BYTES;
long start = Util.now();
DataTransferThrottler throttler = new DataTransferThrottler(bandwidthPerSec);
long totalBytes = 0L;
long bytesSent = 1024*512L; // 0.5MB
throttler.throttle(bytesSent);
bytesToSend -= bytesSent;
bytesSent = 1024*768L; // 0.75MB
throttler.throttle(bytesSent);
bytesToSend -= bytesSent;
try {
Thread.sleep(1000);
} catch (InterruptedException ignored) {}
throttler.throttle(bytesToSend);
long end = Util.now();
assertTrue(totalBytes*1000/(end-start)<=bandwidthPerSec);
}
示例5: testThrottler
import org.apache.hadoop.hdfs.util.DataTransferThrottler; //导入方法依赖的package包/类
public void testThrottler() throws IOException {
Configuration conf = new Configuration();
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
long bandwidthPerSec = 1024*1024L;
final long TOTAL_BYTES =6*bandwidthPerSec;
long bytesToSend = TOTAL_BYTES;
long start = Util.now();
DataTransferThrottler throttler = new DataTransferThrottler(bandwidthPerSec);
long totalBytes = 0L;
long bytesSent = 1024*512L; // 0.5MB
throttler.throttle(bytesSent);
bytesToSend -= bytesSent;
bytesSent = 1024*768L; // 0.75MB
throttler.throttle(bytesSent);
bytesToSend -= bytesSent;
try {
Thread.sleep(1000);
} catch (InterruptedException ignored) {}
throttler.throttle(bytesToSend);
long end = Util.now();
assertTrue(totalBytes*1000/(end-start)<=bandwidthPerSec);
}
示例6: copyFileToStream
import org.apache.hadoop.hdfs.util.DataTransferThrottler; //导入方法依赖的package包/类
private static void copyFileToStream(OutputStream out, File localfile,
FileInputStream infile, DataTransferThrottler throttler,
Canceler canceler) throws IOException {
byte buf[] = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE];
try {
CheckpointFaultInjector.getInstance()
.aboutToSendFile(localfile);
if (CheckpointFaultInjector.getInstance().
shouldSendShortFile(localfile)) {
// Test sending image shorter than localfile
long len = localfile.length();
buf = new byte[(int)Math.min(len/2, HdfsConstants.IO_FILE_BUFFER_SIZE)];
// This will read at most half of the image
// and the rest of the image will be sent over the wire
infile.read(buf);
}
int num = 1;
while (num > 0) {
if (canceler != null && canceler.isCancelled()) {
throw new SaveNamespaceCancelledException(
canceler.getCancellationReason());
}
num = infile.read(buf);
if (num <= 0) {
break;
}
if (CheckpointFaultInjector.getInstance()
.shouldCorruptAByte(localfile)) {
// Simulate a corrupted byte on the wire
LOG.warn("SIMULATING A CORRUPT BYTE IN IMAGE TRANSFER!");
buf[0]++;
}
out.write(buf, 0, num);
if (throttler != null) {
throttler.throttle(num, canceler);
}
}
} catch (EofException e) {
LOG.info("Connection closed by client");
out = null; // so we don't close in the finally
} finally {
if (out != null) {
out.close();
}
}
}
示例7: copyFileToStream
import org.apache.hadoop.hdfs.util.DataTransferThrottler; //导入方法依赖的package包/类
private static void copyFileToStream(OutputStream out, File localfile,
FileInputStream infile, DataTransferThrottler throttler,
Canceler canceler) throws IOException {
byte buf[] = new byte[IO_FILE_BUFFER_SIZE];
try {
CheckpointFaultInjector.getInstance()
.aboutToSendFile(localfile);
if (CheckpointFaultInjector.getInstance().
shouldSendShortFile(localfile)) {
// Test sending image shorter than localfile
long len = localfile.length();
buf = new byte[(int)Math.min(len/2, IO_FILE_BUFFER_SIZE)];
// This will read at most half of the image
// and the rest of the image will be sent over the wire
infile.read(buf);
}
int num = 1;
while (num > 0) {
if (canceler != null && canceler.isCancelled()) {
throw new SaveNamespaceCancelledException(
canceler.getCancellationReason());
}
num = infile.read(buf);
if (num <= 0) {
break;
}
if (CheckpointFaultInjector.getInstance()
.shouldCorruptAByte(localfile)) {
// Simulate a corrupted byte on the wire
LOG.warn("SIMULATING A CORRUPT BYTE IN IMAGE TRANSFER!");
buf[0]++;
}
out.write(buf, 0, num);
if (throttler != null) {
throttler.throttle(num, canceler);
}
}
} catch (EofException e) {
LOG.info("Connection closed by client");
out = null; // so we don't close in the finally
} finally {
if (out != null) {
out.close();
}
}
}
示例8: getFileServer
import org.apache.hadoop.hdfs.util.DataTransferThrottler; //导入方法依赖的package包/类
/**
* A server-side method to respond to a getfile http request
* Copies the contents of the local file into the output stream.
*/
public static void getFileServer(ServletResponse response, File localfile,
FileInputStream infile,
DataTransferThrottler throttler)
throws IOException {
byte buf[] = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE];
ServletOutputStream out = null;
try {
CheckpointFaultInjector.getInstance()
.aboutToSendFile(localfile);
out = response.getOutputStream();
if (CheckpointFaultInjector.getInstance().
shouldSendShortFile(localfile)) {
// Test sending image shorter than localfile
long len = localfile.length();
buf = new byte[(int)Math.min(len/2, HdfsConstants.IO_FILE_BUFFER_SIZE)];
// This will read at most half of the image
// and the rest of the image will be sent over the wire
infile.read(buf);
}
int num = 1;
while (num > 0) {
num = infile.read(buf);
if (num <= 0) {
break;
}
if (CheckpointFaultInjector.getInstance()
.shouldCorruptAByte(localfile)) {
// Simulate a corrupted byte on the wire
LOG.warn("SIMULATING A CORRUPT BYTE IN IMAGE TRANSFER!");
buf[0]++;
}
out.write(buf, 0, num);
if (throttler != null) {
throttler.throttle(num);
}
}
} finally {
if (out != null) {
out.close();
}
}
}
示例9: copyFileToStream
import org.apache.hadoop.hdfs.util.DataTransferThrottler; //导入方法依赖的package包/类
/**
* A server-side method to respond to a getfile http request
* Copies the contents of the local file into the output stream.
*/
public static void copyFileToStream(OutputStream out, File localfile,
FileInputStream infile, DataTransferThrottler throttler)
throws IOException {
byte buf[] = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE];
try {
CheckpointFaultInjector.getInstance()
.aboutToSendFile(localfile);
if (CheckpointFaultInjector.getInstance().
shouldSendShortFile(localfile)) {
// Test sending image shorter than localfile
long len = localfile.length();
buf = new byte[(int)Math.min(len/2, HdfsConstants.IO_FILE_BUFFER_SIZE)];
// This will read at most half of the image
// and the rest of the image will be sent over the wire
infile.read(buf);
}
int num = 1;
while (num > 0) {
num = infile.read(buf);
if (num <= 0) {
break;
}
if (CheckpointFaultInjector.getInstance()
.shouldCorruptAByte(localfile)) {
// Simulate a corrupted byte on the wire
LOG.warn("SIMULATING A CORRUPT BYTE IN IMAGE TRANSFER!");
buf[0]++;
}
out.write(buf, 0, num);
if (throttler != null) {
throttler.throttle(num);
}
}
} finally {
if (out != null) {
out.close();
}
}
}
示例10: getFileServer
import org.apache.hadoop.hdfs.util.DataTransferThrottler; //导入方法依赖的package包/类
/**
* A server-side method to respond to a getfile http request
* Copies the contents of the local file into the output stream.
*/
static void getFileServer(OutputStream outstream, File localfile,
DataTransferThrottler throttler)
throws IOException {
byte buf[] = new byte[BUFFER_SIZE];
FileInputStream infile = null;
try {
infile = new FileInputStream(localfile);
if (ErrorSimulator.getErrorSimulation(2)
&& localfile.getAbsolutePath().contains("secondary")) {
// throw exception only when the secondary sends its image
throw new IOException("If this exception is not caught by the " +
"name-node fs image will be truncated.");
}
if (ErrorSimulator.getErrorSimulation(3)
&& localfile.getAbsolutePath().contains("fsimage")) {
// Test sending image shorter than localfile
long len = localfile.length();
buf = new byte[(int)Math.min(len/2, BUFFER_SIZE)];
// This will read at most half of the image
// and the rest of the image will be sent over the wire
infile.read(buf);
}
int num = 1;
while (num > 0) {
num = infile.read(buf);
if (num <= 0) {
break;
}
outstream.write(buf, 0, num);
if (throttler != null) {
throttler.throttle(num);
}
}
} finally {
if (infile != null) {
infile.close();
}
}
}
示例11: getFileServer
import org.apache.hadoop.hdfs.util.DataTransferThrottler; //导入方法依赖的package包/类
/**
* A server-side method to respond to a getfile http request
* Copies the contents of the local file into the output stream.
*/
static void getFileServer(OutputStream outstream, File localfile, DataTransferThrottler throttler)
throws IOException {
byte buf[] = new byte[BUFFER_SIZE];
FileInputStream infile = null;
long totalReads = 0, totalSends = 0;
try {
infile = new FileInputStream(localfile);
if (ErrorSimulator.getErrorSimulation(2)
&& localfile.getAbsolutePath().contains("secondary")) {
// throw exception only when the secondary sends its image
throw new IOException("If this exception is not caught by the " +
"name-node fs image will be truncated.");
}
if (ErrorSimulator.getErrorSimulation(3)
&& localfile.getAbsolutePath().contains("fsimage")) {
// Test sending image shorter than localfile
long len = localfile.length();
buf = new byte[(int)Math.min(len/2, BUFFER_SIZE)];
// This will read at most half of the image
// and the rest of the image will be sent over the wire
infile.read(buf);
}
int num = 1;
while (num > 0) {
long startRead = System.currentTimeMillis();
num = infile.read(buf);
if (num <= 0) {
break;
}
outstream.write(buf, 0, num);
if (throttler != null) {
throttler.throttle(num);
}
}
} finally {
if (infile != null) {
infile.close();
}
}
}