本文整理汇总了Java中azkaban.utils.FileIOUtils.LogData类的典型用法代码示例。如果您正苦于以下问题:Java LogData类的具体用法?Java LogData怎么用?Java LogData使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
LogData类属于azkaban.utils.FileIOUtils包,在下文中一共展示了LogData类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getExecutableFlowLog
import azkaban.utils.FileIOUtils.LogData; //导入依赖的package包/类
@Override
public LogData getExecutableFlowLog(ExecutableFlow exFlow, int offset,
int length) throws ExecutorManagerException {
Pair<ExecutionReference, ExecutableFlow> pair =
runningFlows.get(exFlow.getExecutionId());
if (pair != null) {
Pair<String, String> typeParam = new Pair<String, String>("type", "flow");
Pair<String, String> offsetParam =
new Pair<String, String>("offset", String.valueOf(offset));
Pair<String, String> lengthParam =
new Pair<String, String>("length", String.valueOf(length));
@SuppressWarnings("unchecked")
Map<String, Object> result =
callExecutorServer(pair.getFirst(), ConnectorParams.LOG_ACTION,
typeParam, offsetParam, lengthParam);
return LogData.createLogDataFromObject(result);
} else {
LogData value =
executorLoader.fetchLogs(exFlow.getExecutionId(), "", 0, offset,
length);
return value;
}
}
示例2: handleFetchJobLogEvent
import azkaban.utils.FileIOUtils.LogData; //导入依赖的package包/类
private void handleFetchJobLogEvent(int execid, HttpServletRequest req,
HttpServletResponse resp, HashMap<String, Object> respMap) {
try {
ExecutableFlow exFlow = executorManager.getExecutableFlow(execid);
String jobId = getParam(req, ExecutorManagerAdapter.INFO_JOB_NAME);
int offset = getIntParam(req, ExecutorManagerAdapter.INFO_OFFSET);
int length = getIntParam(req, ExecutorManagerAdapter.INFO_LENGTH);
int attempt = getIntParam(req, ExecutorManagerAdapter.INFO_ATTEMPT);
LogData log =
executorManager.getExecutionJobLog(exFlow, jobId, offset, length,
attempt);
respMap.put(ExecutorManagerAdapter.INFO_LOG,
JSONUtils.toJSON(log.toObject()));
} catch (Exception e) {
e.printStackTrace();
respMap.put(ExecutorManagerAdapter.INFO_ERROR, e);
}
}
示例3: handleFetchFlowLogEvent
import azkaban.utils.FileIOUtils.LogData; //导入依赖的package包/类
private void handleFetchFlowLogEvent(int execid, HttpServletRequest req,
HttpServletResponse resp, HashMap<String, Object> respMap) {
try {
ExecutableFlow exFlow = executorManager.getExecutableFlow(execid);
int offset = getIntParam(req, ExecutorManagerAdapter.INFO_OFFSET);
int length = getIntParam(req, ExecutorManagerAdapter.INFO_LENGTH);
LogData log =
executorManager.getExecutableFlowLog(exFlow, offset, length);
respMap.put(ExecutorManagerAdapter.INFO_LOG,
JSONUtils.toJSON(log.toObject()));
} catch (Exception e) {
e.printStackTrace();
respMap.put(ExecutorManagerAdapter.INFO_ERROR, e);
}
}
示例4: fetchLogs
import azkaban.utils.FileIOUtils.LogData; //导入依赖的package包/类
@Override
public LogData fetchLogs(int execId, String name, int attempt, int startByte,
int length) throws ExecutorManagerException {
QueryRunner runner = createQueryRunner();
FetchLogsHandler handler =
new FetchLogsHandler(startByte, length + startByte);
try {
LogData result =
runner.query(FetchLogsHandler.FETCH_LOGS, handler, execId, name,
attempt, startByte, startByte + length);
return result;
} catch (SQLException e) {
throw new ExecutorManagerException("Error fetching logs " + execId
+ " : " + name, e);
}
}
示例5: testSmallUploadLog
import azkaban.utils.FileIOUtils.LogData; //导入依赖的package包/类
@Ignore @Test
public void testSmallUploadLog() throws ExecutorManagerException {
File logDir = new File(UNIT_BASE_DIR + "logtest");
File[] smalllog =
{ new File(logDir, "log1.log"), new File(logDir, "log2.log"),
new File(logDir, "log3.log") };
ExecutorLoader loader = createLoader();
loader.uploadLogFile(1, "smallFiles", 0, smalllog);
LogData data = loader.fetchLogs(1, "smallFiles", 0, 0, 50000);
Assert.assertNotNull(data);
Assert.assertEquals("Logs length is " + data.getLength(), data.getLength(),
53);
System.out.println(data.toString());
LogData data2 = loader.fetchLogs(1, "smallFiles", 0, 10, 20);
System.out.println(data2.toString());
Assert.assertNotNull(data2);
Assert.assertEquals("Logs length is " + data2.getLength(),
data2.getLength(), 20);
}
示例6: testSmallUploadLog
import azkaban.utils.FileIOUtils.LogData; //导入依赖的package包/类
@Test
public void testSmallUploadLog() throws ExecutorManagerException {
File logDir = new File("unit/executions/logtest");
File[] smalllog = {new File(logDir, "log1.log"), new File(logDir, "log2.log"), new File(logDir, "log3.log")};
ExecutorLoader loader = createLoader();
loader.uploadLogFile(1, "smallFiles", 0, smalllog);
LogData data = loader.fetchLogs(1, "smallFiles", 0, 0, 50000);
Assert.assertNotNull(data);
Assert.assertEquals("Logs length is " + data.getLength(), data.getLength(), 53);
System.out.println(data.toString());
LogData data2 = loader.fetchLogs(1, "smallFiles", 0, 10, 20);
System.out.println(data2.toString());
Assert.assertNotNull(data2);
Assert.assertEquals("Logs length is " + data2.getLength(), data2.getLength(), 20);
}
示例7: testRemoveExecutionLogsByTime
import azkaban.utils.FileIOUtils.LogData; //导入依赖的package包/类
@SuppressWarnings("static-access")
@Test
public void testRemoveExecutionLogsByTime() throws ExecutorManagerException, IOException, InterruptedException {
ExecutorLoader loader = createLoader();
File logDir = new File("unit/executions/logtest");
// Multiple of 255 for Henry the Eigth
File[] largelog = {new File(logDir, "largeLog1.log"), new File(logDir, "largeLog2.log"), new File(logDir, "largeLog3.log")};
DateTime time1 = DateTime.now();
loader.uploadLogFile(1, "oldlog", 0, largelog);
// sleep for 5 seconds
Thread.currentThread().sleep(5000);
loader.uploadLogFile(2, "newlog", 0, largelog);
DateTime time2 = time1.plusMillis(2500);
int count = loader.removeExecutionLogsByTime(time2.getMillis());
System.out.print("Removed " + count + " records");
LogData logs = loader.fetchLogs(1, "oldlog", 0, 0, 22222);
Assert.assertTrue(logs == null);
logs = loader.fetchLogs(2, "newlog", 0, 0, 22222);
Assert.assertFalse(logs == null);
}
示例8: getExecutableFlowLog
import azkaban.utils.FileIOUtils.LogData; //导入依赖的package包/类
@Override
public LogData getExecutableFlowLog(ExecutableFlow exFlow, int offset, int length) throws ExecutorManagerException {
Pair<ExecutionReference, ExecutableFlow> pair = runningFlows.get(exFlow.getExecutionId());
if (pair != null) {
Pair<String,String> typeParam = new Pair<String,String>("type", "flow");
Pair<String,String> offsetParam = new Pair<String,String>("offset", String.valueOf(offset));
Pair<String,String> lengthParam = new Pair<String,String>("length", String.valueOf(length));
@SuppressWarnings("unchecked")
Map<String, Object> result = callExecutorServer(pair.getFirst(), ConnectorParams.LOG_ACTION, typeParam, offsetParam, lengthParam);
return LogData.createLogDataFromObject(result);
}
else {
LogData value = executorLoader.fetchLogs(exFlow.getExecutionId(), "", 0, offset, length);
return value;
}
}
示例9: fetchLogs
import azkaban.utils.FileIOUtils.LogData; //导入依赖的package包/类
@Override
public LogData fetchLogs(
int execId, String name, int attempt, int startByte, int length)
throws ExecutorManagerException {
QueryRunner runner = createQueryRunner();
FetchLogsHandler handler = new FetchLogsHandler(startByte, length + startByte);
try {
LogData result = runner.query(
FetchLogsHandler.FETCH_LOGS,
handler,
execId,
name,
attempt,
startByte,
startByte + length);
return result;
}
catch (SQLException e) {
throw new ExecutorManagerException(
"Error fetching logs " + execId + " : " + name, e);
}
}
示例10: ajaxFetchExecFlowLogs
import azkaban.utils.FileIOUtils.LogData; //导入依赖的package包/类
/**
* Gets the logs through plain text stream to reduce memory overhead.
*
* @param req
* @param resp
* @param user
* @param exFlow
* @throws ServletException
*/
private void ajaxFetchExecFlowLogs(HttpServletRequest req,
HttpServletResponse resp, HashMap<String, Object> ret, User user,
ExecutableFlow exFlow) throws ServletException {
Project project =
getProjectAjaxByPermission(ret, exFlow.getProjectId(), user, Type.READ);
if (project == null) {
ret.put("error", "Project " + exFlow.getProjectId() + " doesn't exist.");
return;
}
int offset = this.getIntParam(req, "offset");
int length = this.getIntParam(req, "length");
resp.setCharacterEncoding("utf-8");
try {
LogData data =
executorManager.getExecutableFlowLog(exFlow, offset, length);
if (data == null) {
ret.put("length", 0);
ret.put("offset", offset);
ret.put("data", "");
} else {
ret.put("length", data.getLength());
ret.put("offset", data.getOffset());
ret.put("data", StringEscapeUtils.escapeHtml(data.getData()));
}
} catch (ExecutorManagerException e) {
throw new ServletException(e);
}
}
示例11: getExecutionJobLog
import azkaban.utils.FileIOUtils.LogData; //导入依赖的package包/类
@Override
public LogData getExecutionJobLog(ExecutableFlow exFlow, String jobId,
int offset, int length, int attempt) throws ExecutorManagerException {
Pair<ExecutionReference, ExecutableFlow> pair =
runningFlows.get(exFlow.getExecutionId());
if (pair != null) {
Pair<String, String> typeParam = new Pair<String, String>("type", "job");
Pair<String, String> jobIdParam =
new Pair<String, String>("jobId", jobId);
Pair<String, String> offsetParam =
new Pair<String, String>("offset", String.valueOf(offset));
Pair<String, String> lengthParam =
new Pair<String, String>("length", String.valueOf(length));
Pair<String, String> attemptParam =
new Pair<String, String>("attempt", String.valueOf(attempt));
@SuppressWarnings("unchecked")
Map<String, Object> result =
callExecutorServer(pair.getFirst(), ConnectorParams.LOG_ACTION,
typeParam, jobIdParam, offsetParam, lengthParam, attemptParam);
return LogData.createLogDataFromObject(result);
} else {
LogData value =
executorLoader.fetchLogs(exFlow.getExecutionId(), jobId, attempt,
offset, length);
return value;
}
}
示例12: testLargeUploadLog
import azkaban.utils.FileIOUtils.LogData; //导入依赖的package包/类
@Ignore @Test
public void testLargeUploadLog() throws ExecutorManagerException {
File logDir = new File(UNIT_BASE_DIR + "logtest");
// Multiple of 255 for Henry the Eigth
File[] largelog =
{ new File(logDir, "largeLog1.log"), new File(logDir, "largeLog2.log"),
new File(logDir, "largeLog3.log") };
ExecutorLoader loader = createLoader();
loader.uploadLogFile(1, "largeFiles", 0, largelog);
LogData logsResult = loader.fetchLogs(1, "largeFiles", 0, 0, 64000);
Assert.assertNotNull(logsResult);
Assert.assertEquals("Logs length is " + logsResult.getLength(),
logsResult.getLength(), 64000);
LogData logsResult2 = loader.fetchLogs(1, "largeFiles", 0, 1000, 64000);
Assert.assertNotNull(logsResult2);
Assert.assertEquals("Logs length is " + logsResult2.getLength(),
logsResult2.getLength(), 64000);
LogData logsResult3 = loader.fetchLogs(1, "largeFiles", 0, 330000, 400000);
Assert.assertNotNull(logsResult3);
Assert.assertEquals("Logs length is " + logsResult3.getLength(),
logsResult3.getLength(), 5493);
LogData logsResult4 = loader.fetchLogs(1, "largeFiles", 0, 340000, 400000);
Assert.assertNull(logsResult4);
LogData logsResult5 = loader.fetchLogs(1, "largeFiles", 0, 153600, 204800);
Assert.assertNotNull(logsResult5);
Assert.assertEquals("Logs length is " + logsResult5.getLength(),
logsResult5.getLength(), 181893);
LogData logsResult6 = loader.fetchLogs(1, "largeFiles", 0, 150000, 250000);
Assert.assertNotNull(logsResult6);
Assert.assertEquals("Logs length is " + logsResult6.getLength(),
logsResult6.getLength(), 185493);
}
示例13: testRemoveExecutionLogsByTime
import azkaban.utils.FileIOUtils.LogData; //导入依赖的package包/类
@SuppressWarnings("static-access")
@Ignore @Test
public void testRemoveExecutionLogsByTime() throws ExecutorManagerException,
IOException, InterruptedException {
ExecutorLoader loader = createLoader();
File logDir = new File(UNIT_BASE_DIR + "logtest");
// Multiple of 255 for Henry the Eigth
File[] largelog =
{ new File(logDir, "largeLog1.log"), new File(logDir, "largeLog2.log"),
new File(logDir, "largeLog3.log") };
DateTime time1 = DateTime.now();
loader.uploadLogFile(1, "oldlog", 0, largelog);
// sleep for 5 seconds
Thread.currentThread().sleep(5000);
loader.uploadLogFile(2, "newlog", 0, largelog);
DateTime time2 = time1.plusMillis(2500);
int count = loader.removeExecutionLogsByTime(time2.getMillis());
System.out.print("Removed " + count + " records");
LogData logs = loader.fetchLogs(1, "oldlog", 0, 0, 22222);
Assert.assertTrue(logs == null);
logs = loader.fetchLogs(2, "newlog", 0, 0, 22222);
Assert.assertFalse(logs == null);
}
示例14: readFlowLogs
import azkaban.utils.FileIOUtils.LogData; //导入依赖的package包/类
public LogData readFlowLogs(int execId, int startByte, int length)
throws ExecutorManagerException {
FlowRunner runner = runningFlows.get(execId);
if (runner == null) {
throw new ExecutorManagerException("Running flow " + execId
+ " not found.");
}
File dir = runner.getExecutionDir();
if (dir != null && dir.exists()) {
try {
synchronized (executionDirDeletionSync) {
if (!dir.exists()) {
throw new ExecutorManagerException(
"Execution dir file doesn't exist. Probably has beend deleted");
}
File logFile = runner.getFlowLogFile();
if (logFile != null && logFile.exists()) {
return FileIOUtils.readUtf8File(logFile, startByte, length);
} else {
throw new ExecutorManagerException("Flow log file doesn't exist.");
}
}
} catch (IOException e) {
throw new ExecutorManagerException(e);
}
}
throw new ExecutorManagerException(
"Error reading file. Log directory doesn't exist.");
}
示例15: readJobLogs
import azkaban.utils.FileIOUtils.LogData; //导入依赖的package包/类
public LogData readJobLogs(int execId, String jobId, int attempt,
int startByte, int length) throws ExecutorManagerException {
FlowRunner runner = runningFlows.get(execId);
if (runner == null) {
throw new ExecutorManagerException("Running flow " + execId
+ " not found.");
}
File dir = runner.getExecutionDir();
if (dir != null && dir.exists()) {
try {
synchronized (executionDirDeletionSync) {
if (!dir.exists()) {
throw new ExecutorManagerException(
"Execution dir file doesn't exist. Probably has beend deleted");
}
File logFile = runner.getJobLogFile(jobId, attempt);
if (logFile != null && logFile.exists()) {
return FileIOUtils.readUtf8File(logFile, startByte, length);
} else {
throw new ExecutorManagerException("Job log file doesn't exist.");
}
}
} catch (IOException e) {
throw new ExecutorManagerException(e);
}
}
throw new ExecutorManagerException(
"Error reading file. Log directory doesn't exist.");
}