本文整理汇总了Java中org.apache.hadoop.io.SecureIOUtils.openForRead方法的典型用法代码示例。如果您正苦于以下问题:Java SecureIOUtils.openForRead方法的具体用法?Java SecureIOUtils.openForRead怎么用?Java SecureIOUtils.openForRead使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.io.SecureIOUtils
的用法示例。
在下文中一共展示了SecureIOUtils.openForRead方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: openLogFileForRead
import org.apache.hadoop.io.SecureIOUtils; //导入方法依赖的package包/类
public static FileInputStream openLogFileForRead(String containerIdStr, File logFile,
Context context) throws IOException {
ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
ApplicationId applicationId = containerId.getApplicationAttemptId()
.getApplicationId();
String user = context.getApplications().get(
applicationId).getUser();
try {
return SecureIOUtils.openForRead(logFile, user, null);
} catch (IOException e) {
if (e.getMessage().contains(
"did not match expected owner '" + user
+ "'")) {
LOG.error(
"Exception reading log file " + logFile.getAbsolutePath(), e);
throw new IOException("Exception reading log file. Application submitted by '"
+ user
+ "' doesn't own requested log file : "
+ logFile.getName(), e);
} else {
throw new IOException("Exception reading log file. It might be because log "
+ "file was aggregated : " + logFile.getName(), e);
}
}
}
示例2: openLogFileForRead
import org.apache.hadoop.io.SecureIOUtils; //导入方法依赖的package包/类
public static FileInputStream openLogFileForRead(String containerIdStr, File logFile,
Context context) throws IOException {
ContainerId containerId = ContainerId.fromString(containerIdStr);
ApplicationId applicationId = containerId.getApplicationAttemptId()
.getApplicationId();
String user = context.getApplications().get(
applicationId).getUser();
try {
return SecureIOUtils.openForRead(logFile, user, null);
} catch (IOException e) {
if (e.getMessage().contains(
"did not match expected owner '" + user
+ "'")) {
LOG.error(
"Exception reading log file " + logFile.getAbsolutePath(), e);
throw new IOException("Exception reading log file. Application submitted by '"
+ user
+ "' doesn't own requested log file : "
+ logFile.getName(), e);
} else {
throw new IOException("Exception reading log file. It might be because log "
+ "file was aggregated : " + logFile.getName(), e);
}
}
}
示例3: Reader
import org.apache.hadoop.io.SecureIOUtils; //导入方法依赖的package包/类
/**
* Read a log file from start to end positions. The offsets may be negative,
* in which case they are relative to the end of the file. For example,
* Reader(taskid, kind, 0, -1) is the entire file and
* Reader(taskid, kind, -4197, -1) is the last 4196 bytes.
* @param taskid the id of the task to read the log file for
* @param kind the kind of log to read
* @param start the offset to read from (negative is relative to tail)
* @param end the offset to read upto (negative is relative to tail)
* @param isCleanup whether the attempt is cleanup attempt or not
* @throws IOException
*/
public Reader(TaskAttemptID taskid, LogName kind,
long start, long end, boolean isCleanup) throws IOException {
// find the right log file
LogFileDetail fileDetail = getLogFileDetail(taskid, kind, isCleanup);
// calculate the start and stop
long size = fileDetail.length;
if (start < 0) {
start += size + 1;
}
if (end < 0) {
end += size + 1;
}
start = Math.max(0, Math.min(start, size));
end = Math.max(0, Math.min(end, size));
start += fileDetail.start;
end += fileDetail.start;
bytesRemaining = end - start;
String owner = obtainLogDirOwner(taskid);
file = SecureIOUtils.openForRead(new File(fileDetail.location, kind.toString()),
owner, null);
// skip upto start
long pos = 0;
while (pos < start) {
long result = file.skip(start - pos);
if (result < 0) {
bytesRemaining = 0;
break;
}
pos += result;
}
}
示例4: SpillRecord
import org.apache.hadoop.io.SecureIOUtils; //导入方法依赖的package包/类
public SpillRecord(Path indexFileName, JobConf job, Checksum crc,
String expectedIndexOwner) throws IOException {
final FileSystem rfs = FileSystem.getLocal(job).getRaw();
final DataInputStream in =
new DataInputStream(SecureIOUtils.openForRead(
new File(indexFileName.toUri().getPath()), expectedIndexOwner, null));
try {
final long length = rfs.getFileStatus(indexFileName).getLen();
final int partitions = (int) length / MAP_OUTPUT_INDEX_RECORD_LENGTH;
final int size = partitions * MAP_OUTPUT_INDEX_RECORD_LENGTH;
buf = ByteBuffer.allocate(size);
if (crc != null) {
crc.reset();
CheckedInputStream chk = new CheckedInputStream(in, crc);
IOUtils.readFully(chk, buf.array(), 0, size);
if (chk.getChecksum().getValue() != in.readLong()) {
throw new ChecksumException("Checksum error reading spill index: " +
indexFileName, -1);
}
} else {
IOUtils.readFully(in, buf.array(), 0, size);
}
entries = buf.asLongBuffer();
} finally {
in.close();
}
}
示例5: Reader
import org.apache.hadoop.io.SecureIOUtils; //导入方法依赖的package包/类
/**
* Read a log file from start to end positions. The offsets may be negative,
* in which case they are relative to the end of the file. For example,
* Reader(taskid, kind, 0, -1) is the entire file and
* Reader(taskid, kind, -4197, -1) is the last 4196 bytes.
* @param taskid the id of the task to read the log file for
* @param kind the kind of log to read
* @param start the offset to read from (negative is relative to tail)
* @param end the offset to read upto (negative is relative to tail)
* @param isCleanup whether the attempt is cleanup attempt or not
* @throws IOException
*/
public Reader(TaskAttemptID taskid, LogName kind,
long start, long end, boolean isCleanup) throws IOException {
// find the right log file
Map<LogName, LogFileDetail> allFilesDetails =
getAllLogsFileDetails(taskid, isCleanup);
LogFileDetail fileDetail = allFilesDetails.get(kind);
// calculate the start and stop
long size = fileDetail.length;
if (start < 0) {
start += size + 1;
}
if (end < 0) {
end += size + 1;
}
start = Math.max(0, Math.min(start, size));
end = Math.max(0, Math.min(end, size));
start += fileDetail.start;
end += fileDetail.start;
bytesRemaining = end - start;
String owner = obtainLogDirOwner(taskid);
file = SecureIOUtils.openForRead(new File(fileDetail.location, kind.toString()),
owner, null);
// skip upto start
long pos = 0;
while (pos < start) {
long result = file.skip(start - pos);
if (result < 0) {
bytesRemaining = 0;
break;
}
pos += result;
}
}
示例6: SpillRecord
import org.apache.hadoop.io.SecureIOUtils; //导入方法依赖的package包/类
public SpillRecord(Path indexFileName, JobConf job, Checksum crc,
String expectedIndexOwner) throws IOException {
final FileSystem rfs = FileSystem.getLocal(job).getRaw();
final DataInputStream in =
new DataInputStream(SecureIOUtils.openForRead(
new File(indexFileName.toUri().getPath()), expectedIndexOwner));
try {
final long length = rfs.getFileStatus(indexFileName).getLen();
final int partitions = (int) length / MAP_OUTPUT_INDEX_RECORD_LENGTH;
final int size = partitions * MAP_OUTPUT_INDEX_RECORD_LENGTH;
buf = ByteBuffer.allocate(size);
if (crc != null) {
crc.reset();
CheckedInputStream chk = new CheckedInputStream(in, crc);
IOUtils.readFully(chk, buf.array(), 0, size);
if (chk.getChecksum().getValue() != in.readLong()) {
throw new ChecksumException("Checksum error reading spill index: " +
indexFileName, -1);
}
} else {
IOUtils.readFully(in, buf.array(), 0, size);
}
entries = buf.asLongBuffer();
} finally {
in.close();
}
}
示例7: Reader
import org.apache.hadoop.io.SecureIOUtils; //导入方法依赖的package包/类
/**
* Read a log file from start to end positions. The offsets may be negative,
* in which case they are relative to the end of the file. For example,
* Reader(taskid, kind, 0, -1) is the entire file and
* Reader(taskid, kind, -4197, -1) is the last 4196 bytes.
* @param taskid the id of the task to read the log file for
* @param kind the kind of log to read
* @param start the offset to read from (negative is relative to tail)
* @param end the offset to read upto (negative is relative to tail)
* @param isCleanup whether the attempt is cleanup attempt or not
* @throws IOException
*/
public Reader(TaskAttemptID taskid, LogName kind,
long start, long end, boolean isCleanup) throws IOException {
// find the right log file
Map<LogName, LogFileDetail> allFilesDetails =
getAllLogsFileDetails(taskid, isCleanup);
LogFileDetail fileDetail = allFilesDetails.get(kind);
// calculate the start and stop
long size = fileDetail.length;
if (start < 0) {
start += size + 1;
}
if (end < 0) {
end += size + 1;
}
start = Math.max(0, Math.min(start, size));
end = Math.max(0, Math.min(end, size));
start += fileDetail.start;
end += fileDetail.start;
bytesRemaining = end - start;
String owner = obtainLogDirOwner(taskid);
file = SecureIOUtils.openForRead(new File(fileDetail.location, kind.toString()),
owner);
// skip upto start
long pos = 0;
while (pos < start) {
long result = file.skip(start - pos);
if (result < 0) {
bytesRemaining = 0;
break;
}
pos += result;
}
}
示例8: SpillRecord
import org.apache.hadoop.io.SecureIOUtils; //导入方法依赖的package包/类
public SpillRecord(Path indexFileName, JobConf job, Checksum crc,
String expectedIndexOwner)
throws IOException {
final FileSystem rfs = FileSystem.getLocal(job).getRaw();
final DataInputStream in =
new DataInputStream(SecureIOUtils.openForRead(
new File(indexFileName.toUri().getPath()), expectedIndexOwner, null));
try {
final long length = rfs.getFileStatus(indexFileName).getLen();
final int partitions = (int) length / MAP_OUTPUT_INDEX_RECORD_LENGTH;
final int size = partitions * MAP_OUTPUT_INDEX_RECORD_LENGTH;
buf = ByteBuffer.allocate(size);
if (crc != null) {
crc.reset();
CheckedInputStream chk = new CheckedInputStream(in, crc);
IOUtils.readFully(chk, buf.array(), 0, size);
if (chk.getChecksum().getValue() != in.readLong()) {
throw new ChecksumException("Checksum error reading spill index: " +
indexFileName, -1);
}
} else {
IOUtils.readFully(in, buf.array(), 0, size);
}
entries = buf.asLongBuffer();
} finally {
in.close();
}
}
示例9: secureOpenFile
import org.apache.hadoop.io.SecureIOUtils; //导入方法依赖的package包/类
@VisibleForTesting
public FileInputStream secureOpenFile(File logFile) throws IOException {
return SecureIOUtils.openForRead(logFile, getUser(), null);
}
示例10: getLogFileDetail
import org.apache.hadoop.io.SecureIOUtils; //导入方法依赖的package包/类
private static LogFileDetail getLogFileDetail(TaskAttemptID taskid,
LogName filter,
boolean isCleanup)
throws IOException {
File indexFile = getIndexFile(taskid, isCleanup);
BufferedReader fis = new BufferedReader(new InputStreamReader(
SecureIOUtils.openForRead(indexFile, obtainLogDirOwner(taskid), null),
Charsets.UTF_8));
//the format of the index file is
//LOG_DIR: <the dir where the task logs are really stored>
//stdout:<start-offset in the stdout file> <length>
//stderr:<start-offset in the stderr file> <length>
//syslog:<start-offset in the syslog file> <length>
LogFileDetail l = new LogFileDetail();
String str = null;
try {
str = fis.readLine();
if (str == null) { // the file doesn't have anything
throw new IOException("Index file for the log of " + taskid
+ " doesn't exist.");
}
l.location = str.substring(str.indexOf(LogFileDetail.LOCATION)
+ LogFileDetail.LOCATION.length());
// special cases are the debugout and profile.out files. They are
// guaranteed
// to be associated with each task attempt since jvm reuse is disabled
// when profiling/debugging is enabled
if (filter.equals(LogName.DEBUGOUT) || filter.equals(LogName.PROFILE)) {
l.length = new File(l.location, filter.toString()).length();
l.start = 0;
fis.close();
return l;
}
str = fis.readLine();
while (str != null) {
// look for the exact line containing the logname
if (str.contains(filter.toString())) {
str = str.substring(filter.toString().length() + 1);
String[] startAndLen = str.split(" ");
l.start = Long.parseLong(startAndLen[0]);
l.length = Long.parseLong(startAndLen[1]);
break;
}
str = fis.readLine();
}
fis.close();
fis = null;
} finally {
IOUtils.cleanup(LOG, fis);
}
return l;
}
示例11: write
import org.apache.hadoop.io.SecureIOUtils; //导入方法依赖的package包/类
public void write(DataOutputStream out) throws IOException {
for (String rootLogDir : this.rootLogDirs) {
File appLogDir =
new File(rootLogDir,
ConverterUtils.toString(
this.containerId.getApplicationAttemptId().
getApplicationId())
);
File containerLogDir =
new File(appLogDir, ConverterUtils.toString(this.containerId));
if (!containerLogDir.isDirectory()) {
continue; // ContainerDir may have been deleted by the user.
}
// Write out log files in lexical order
File[] logFiles = containerLogDir.listFiles();
Arrays.sort(logFiles);
for (File logFile : logFiles) {
// Write the logFile Type
out.writeUTF(logFile.getName());
// Write the log length as UTF so that it is printable
out.writeUTF(String.valueOf(logFile.length()));
// Write the log itself
FileInputStream in = null;
try {
in = SecureIOUtils.openForRead(logFile, getUser(), null);
byte[] buf = new byte[65535];
int len = 0;
while ((len = in.read(buf)) != -1) {
out.write(buf, 0, len);
}
} catch (IOException e) {
String message = "Error aggregating log file. Log file : "
+ logFile.getAbsolutePath() + e.getMessage();
LOG.error(message, e);
out.write(message.getBytes());
} finally {
if (in != null) {
in.close();
}
}
}
}
}
示例12: write
import org.apache.hadoop.io.SecureIOUtils; //导入方法依赖的package包/类
public void write(DataOutputStream out) throws IOException {
for (String rootLogDir : this.rootLogDirs) {
File appLogDir =
new File(rootLogDir,
ConverterUtils.toString(
this.containerId.getApplicationAttemptId().
getApplicationId())
);
File containerLogDir =
new File(appLogDir, ConverterUtils.toString(this.containerId));
if (!containerLogDir.isDirectory()) {
continue; // ContainerDir may have been deleted by the user.
}
// Write out log files in lexical order
File[] logFiles = containerLogDir.listFiles();
Arrays.sort(logFiles);
for (File logFile : logFiles) {
final long fileLength = logFile.length();
// Write the logFile Type
out.writeUTF(logFile.getName());
// Write the log length as UTF so that it is printable
out.writeUTF(String.valueOf(fileLength));
// Write the log itself
FileInputStream in = null;
try {
in = SecureIOUtils.openForRead(logFile, getUser(), null);
byte[] buf = new byte[65535];
int len = 0;
long bytesLeft = fileLength;
while ((len = in.read(buf)) != -1) {
//If buffer contents within fileLength, write
if (len < bytesLeft) {
out.write(buf, 0, len);
bytesLeft-=len;
}
//else only write contents within fileLength, then exit early
else {
out.write(buf, 0, (int)bytesLeft);
break;
}
}
long newLength = logFile.length();
if(fileLength < newLength) {
LOG.warn("Aggregated logs truncated by approximately "+
(newLength-fileLength) +" bytes.");
}
} catch (IOException e) {
String message = "Error aggregating log file. Log file : "
+ logFile.getAbsolutePath() + e.getMessage();
LOG.error(message, e);
out.write(message.getBytes());
} finally {
if (in != null) {
in.close();
}
}
}
}
}
示例13: getLogFileDetail
import org.apache.hadoop.io.SecureIOUtils; //导入方法依赖的package包/类
private static LogFileDetail getLogFileDetail(TaskAttemptID taskid,
LogName filter,
boolean isCleanup)
throws IOException {
File indexFile = getIndexFile(taskid, isCleanup);
BufferedReader fis = new BufferedReader(new InputStreamReader(
SecureIOUtils.openForRead(indexFile, obtainLogDirOwner(taskid), null)));
//the format of the index file is
//LOG_DIR: <the dir where the task logs are really stored>
//stdout:<start-offset in the stdout file> <length>
//stderr:<start-offset in the stderr file> <length>
//syslog:<start-offset in the syslog file> <length>
LogFileDetail l = new LogFileDetail();
String str = fis.readLine();
if (str == null) { //the file doesn't have anything
throw new IOException ("Index file for the log of " + taskid+" doesn't exist.");
}
l.location = str.substring(str.indexOf(LogFileDetail.LOCATION)+
LogFileDetail.LOCATION.length());
//special cases are the debugout and profile.out files. They are guaranteed
//to be associated with each task attempt since jvm reuse is disabled
//when profiling/debugging is enabled
if (filter.equals(LogName.DEBUGOUT) || filter.equals(LogName.PROFILE)) {
l.length = new File(l.location, filter.toString()).length();
l.start = 0;
fis.close();
return l;
}
str = fis.readLine();
while (str != null) {
//look for the exact line containing the logname
if (str.contains(filter.toString())) {
str = str.substring(filter.toString().length()+1);
String[] startAndLen = str.split(" ");
l.start = Long.parseLong(startAndLen[0]);
l.length = Long.parseLong(startAndLen[1]);
break;
}
str = fis.readLine();
}
fis.close();
return l;
}