本文整理匯總了Java中org.apache.hadoop.util.StringUtils.humanReadableInt方法的典型用法代碼示例。如果您正苦於以下問題:Java StringUtils.humanReadableInt方法的具體用法?Java StringUtils.humanReadableInt怎麽用?Java StringUtils.humanReadableInt使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.util.StringUtils
的用法示例。
在下文中一共展示了StringUtils.humanReadableInt方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: finalize
import org.apache.hadoop.util.StringUtils; //導入方法依賴的package包/類
@SuppressWarnings("unchecked")
void finalize(JobFactory factory, String inputPath, long dataSize,
UserResolver userResolver, DataStatistics stats,
Configuration conf)
throws IOException {
numJobsInInputTrace = factory.numJobsInTrace;
endTime = System.currentTimeMillis();
if ("-".equals(inputPath)) {
inputTraceLocation = Summarizer.NA;
inputTraceSignature = Summarizer.NA;
} else {
Path inputTracePath = new Path(inputPath);
FileSystem fs = inputTracePath.getFileSystem(conf);
inputTraceLocation = fs.makeQualified(inputTracePath).toString();
inputTraceSignature = getTraceSignature(inputPath);
}
jobSubmissionPolicy = Gridmix.getJobSubmissionPolicy(conf).name();
resolver = userResolver.getClass().getName();
if (dataSize > 0) {
expectedDataSize = StringUtils.humanReadableInt(dataSize);
} else {
expectedDataSize = Summarizer.NA;
}
dataStats = stats;
totalRuntime = System.currentTimeMillis() - getStartTime();
}
示例2: copyData
import org.apache.hadoop.util.StringUtils; //導入方法依賴的package包/類
private void copyData(final Context context,
final Path inputPath, final InputStream in,
final Path outputPath, final FSDataOutputStream out,
final long inputFileSize)
throws IOException {
final String statusMessage = "copied %s/" + StringUtils.humanReadableInt(inputFileSize) +
" (%.1f%%)";
try {
byte[] buffer = new byte[bufferSize];
long totalBytesWritten = 0;
int reportBytes = 0;
int bytesRead;
long stime = System.currentTimeMillis();
while ((bytesRead = in.read(buffer)) > 0) {
out.write(buffer, 0, bytesRead);
totalBytesWritten += bytesRead;
reportBytes += bytesRead;
if (reportBytes >= REPORT_SIZE) {
context.getCounter(Counter.BYTES_COPIED).increment(reportBytes);
context.setStatus(String.format(statusMessage,
StringUtils.humanReadableInt(totalBytesWritten),
(totalBytesWritten/(float)inputFileSize) * 100.0f) +
" from " + inputPath + " to " + outputPath);
reportBytes = 0;
}
}
long etime = System.currentTimeMillis();
context.getCounter(Counter.BYTES_COPIED).increment(reportBytes);
context.setStatus(String.format(statusMessage,
StringUtils.humanReadableInt(totalBytesWritten),
(totalBytesWritten/(float)inputFileSize) * 100.0f) +
" from " + inputPath + " to " + outputPath);
// Verify that the written size match
if (totalBytesWritten != inputFileSize) {
String msg = "number of bytes copied not matching copied=" + totalBytesWritten +
" expected=" + inputFileSize + " for file=" + inputPath;
throw new IOException(msg);
}
LOG.info("copy completed for input=" + inputPath + " output=" + outputPath);
LOG.info("size=" + totalBytesWritten +
" (" + StringUtils.humanReadableInt(totalBytesWritten) + ")" +
" time=" + StringUtils.formatTimeDiff(etime, stime) +
String.format(" %.3fM/sec", (totalBytesWritten / ((etime - stime)/1000.0))/1048576.0));
context.getCounter(Counter.FILES_COPIED).increment(1);
} catch (IOException e) {
LOG.error("Error copying " + inputPath + " to " + outputPath, e);
context.getCounter(Counter.COPY_FAILED).increment(1);
throw e;
}
}
示例3: fileSizeToString
import org.apache.hadoop.util.StringUtils; //導入方法依賴的package包/類
private String fileSizeToString(long size) {
return printSizeInBytes ? Long.toString(size) : StringUtils.humanReadableInt(size);
}