本文整理汇总了Java中org.apache.kylin.job.execution.ExecuteResult类的典型用法代码示例。如果您正苦于以下问题:Java ExecuteResult类的具体用法?Java ExecuteResult怎么用?Java ExecuteResult使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ExecuteResult类属于org.apache.kylin.job.execution包,在下文中一共展示了ExecuteResult类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: doWork
import org.apache.kylin.job.execution.ExecuteResult; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
final CubeManager mgr = CubeManager.getInstance(context.getConfig());
final CubeInstance cube = mgr.getCube(CubingExecutableUtil.getCubeName(this.getParams()));
final CubeSegment optimizeSegment = cube.getSegmentById(CubingExecutableUtil.getSegmentId(this.getParams()));
CubeSegment oldSegment = optimizeSegment.getCubeInstance().getOriginalSegmentToOptimize(optimizeSegment);
Preconditions.checkNotNull(oldSegment,
"cannot find the original segment to be optimized by " + optimizeSegment);
// --- Copy dictionary
optimizeSegment.getDictionaries().putAll(oldSegment.getDictionaries());
optimizeSegment.getSnapshots().putAll(oldSegment.getSnapshots());
optimizeSegment.getRowkeyStats().addAll(oldSegment.getRowkeyStats());
try {
CubeUpdate cubeBuilder = new CubeUpdate(cube);
cubeBuilder.setToUpdateSegs(optimizeSegment);
mgr.updateCube(cubeBuilder);
} catch (IOException e) {
logger.error("fail to merge dictionary or lookup snapshots", e);
return ExecuteResult.createError(e);
}
return new ExecuteResult();
}
示例2: doWork
import org.apache.kylin.job.execution.ExecuteResult; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
final CubeManager cubeManager = CubeManager.getInstance(context.getConfig());
final CubeInstance cube = cubeManager.getCube(CubingExecutableUtil.getCubeName(this.getParams()));
Set<Long> recommendCuboids = cube.getCuboidsRecommend();
try {
List<CubeSegment> newSegments = cube.getSegments(SegmentStatusEnum.READY_PENDING);
Map<Long, Long> recommendCuboidsWithStats = CuboidStatsReaderUtil
.readCuboidStatsFromSegments(recommendCuboids, newSegments);
if (recommendCuboidsWithStats == null) {
throw new RuntimeException("Fail to get statistics info for recommended cuboids after optimization!!!");
}
cubeManager.promoteCheckpointOptimizeSegments(cube, recommendCuboidsWithStats,
newSegments.toArray(new CubeSegment[newSegments.size()]));
return new ExecuteResult();
} catch (Exception e) {
logger.error("fail to update cube after build", e);
return ExecuteResult.createError(e);
}
}
示例3: updateMetrics
import org.apache.kylin.job.execution.ExecuteResult; //导入依赖的package包/类
protected void updateMetrics(ExecutableContext context, ExecuteResult result, ExecutableState state) {
JobMetricsFacade.JobStatisticsResult jobStats = new JobMetricsFacade.JobStatisticsResult();
jobStats.setWrapper(getSubmitter(), getProjectName(),
CubingExecutableUtil.getCubeName(getParams()), getId(), getJobType(),
getAlgorithm() == null ? "NULL" : getAlgorithm().toString());
if (state == ExecutableState.SUCCEED) {
jobStats.setJobStats(findSourceSizeBytes(), findCubeSizeBytes(), getDuration(), getMapReduceWaitTime(),
getPerBytesTimeCost(findSourceSizeBytes(), getDuration()));
if (CubingJobTypeEnum.getByName(getJobType()) == CubingJobTypeEnum.BUILD) {
jobStats.setJobStepStats(
getTaskByName(ExecutableConstants.STEP_NAME_FACT_DISTINCT_COLUMNS).getDuration(),
getTaskByName(ExecutableConstants.STEP_NAME_BUILD_DICTIONARY).getDuration(),
getTaskByName(ExecutableConstants.STEP_NAME_BUILD_IN_MEM_CUBE).getDuration(),
getTaskByName(ExecutableConstants.STEP_NAME_CONVERT_CUBOID_TO_HFILE).getDuration());
}
} else if (state == ExecutableState.ERROR) {
jobStats.setJobException(result.getThrowable() != null ? result.getThrowable() : new Exception());
}
JobMetricsFacade.updateMetrics(jobStats);
}
示例4: doWork
import org.apache.kylin.job.execution.ExecuteResult; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
doingWork = true;
try {
for (int i = 0; i < 20; i++) {
sleepOneSecond();
if (isDiscarded())
return new ExecuteResult(ExecuteResult.State.STOPPED, "stopped");
}
return new ExecuteResult();
} finally {
doingWork = false;
}
}
示例5: doWork
import org.apache.kylin.job.execution.ExecuteResult; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
try {
config = new JobEngineConfig(context.getConfig());
List<String> toDeletePaths = getDeletePaths();
dropHdfsPathOnCluster(toDeletePaths, HadoopUtil.getWorkingFileSystem());
if (StringUtils.isNotEmpty(context.getConfig().getHBaseClusterFs())) {
dropHdfsPathOnCluster(toDeletePaths, FileSystem.get(HBaseConnection.getCurrentHBaseConfiguration()));
}
} catch (IOException e) {
logger.error("job:" + getId() + " execute finished with exception", e);
output.append("\n").append(e.getLocalizedMessage());
return new ExecuteResult(ExecuteResult.State.ERROR, output.toString(), e);
}
return new ExecuteResult(ExecuteResult.State.SUCCEED, output.toString());
}
示例6: doWork
import org.apache.kylin.job.execution.ExecuteResult; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
KylinConfig conf = context.getConfig();
final CubeManager mgr = CubeManager.getInstance(conf);
final CubeInstance cube = mgr.getCube(getCubeName());
final CubeSegment newSegment = cube.getSegmentById(getSegmentId());
final List<CubeSegment> mergingSegments = getMergingSegments(cube);
Collections.sort(mergingSegments);
try {
checkLookupSnapshotsMustIncremental(mergingSegments);
makeDictForNewSegment(conf, cube, newSegment, mergingSegments);
makeSnapshotForNewSegment(cube, newSegment, mergingSegments);
mgr.updateCube(cube);
return new ExecuteResult(ExecuteResult.State.SUCCEED, "succeed");
} catch (IOException e) {
logger.error("fail to merge dictionary or lookup snapshots", e);
return new ExecuteResult(ExecuteResult.State.ERROR, e.getLocalizedMessage());
}
}
示例7: doWork
import org.apache.kylin.job.execution.ExecuteResult; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
KylinConfig config = context.getConfig();
StringBuffer output = new StringBuffer();
try {
output.append(cleanUpIntermediateFlatTable(config));
// don't drop view to avoid concurrent issue
//output.append(cleanUpHiveViewIntermediateTable(config));
} catch (IOException e) {
logger.error("job:" + getId() + " execute finished with exception", e);
return ExecuteResult.createError(e);
}
return new ExecuteResult(ExecuteResult.State.SUCCEED, output.toString());
}
示例8: doWork
import org.apache.kylin.job.execution.ExecuteResult; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
KylinConfig config = getCubeSpecificConfig();
try {
createFlatHiveTable(config);
return new ExecuteResult(ExecuteResult.State.SUCCEED, stepLogger.getBufferedLog());
} catch (Exception e) {
logger.error("job:" + getId() + " execute finished with exception", e);
return new ExecuteResult(ExecuteResult.State.ERROR, stepLogger.getBufferedLog(), e);
}
}
示例9: doWork
import org.apache.kylin.job.execution.ExecuteResult; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
KylinConfig config = KylinConfig.getInstanceFromEnv();
try {
sqoopFlatHiveTable(config);
return new ExecuteResult(ExecuteResult.State.SUCCEED, stepLogger.getBufferedLog());
} catch (Exception e) {
logger.error("job:" + getId() + " execute finished with exception", e);
return new ExecuteResult(ExecuteResult.State.ERROR, stepLogger.getBufferedLog(), e);
}
}
示例10: doWork
import org.apache.kylin.job.execution.ExecuteResult; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
KylinConfig config = KylinConfig.getInstanceFromEnv();
try {
createFlatHiveTable(config);
return new ExecuteResult(ExecuteResult.State.SUCCEED, stepLogger.getBufferedLog());
} catch (Exception e) {
logger.error("job:" + getId() + " execute finished with exception", e);
return new ExecuteResult(ExecuteResult.State.ERROR, stepLogger.getBufferedLog(), e);
}
}
示例11: doWork
import org.apache.kylin.job.execution.ExecuteResult; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
if (context.getConfig() == BaseTestDistributedScheduler.kylinConfig1) {
return new ExecuteResult();
} else {
return new ExecuteResult(ExecuteResult.State.ERROR, "error");
}
}
示例12: doWork
import org.apache.kylin.job.execution.ExecuteResult; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
final CubeManager cubeManager = CubeManager.getInstance(context.getConfig());
final CubeInstance cube = cubeManager.getCube(CubingExecutableUtil.getCubeName(this.getParams()));
final CubeSegment segment = cube.getSegmentById(CubingExecutableUtil.getSegmentId(this.getParams()));
CubeSegment originalSegment = cube.getOriginalSegmentToOptimize(segment);
long sourceCount = originalSegment.getInputRecords();
long sourceSizeBytes = originalSegment.getInputRecordsSize();
CubingJob cubingJob = (CubingJob) getManager().getJob(CubingExecutableUtil.getCubingJobId(this.getParams()));
long cubeSizeBytes = cubingJob.findCubeSizeBytes();
segment.setLastBuildJobID(CubingExecutableUtil.getCubingJobId(this.getParams()));
segment.setLastBuildTime(System.currentTimeMillis());
segment.setSizeKB(cubeSizeBytes / 1024);
segment.setInputRecords(sourceCount);
segment.setInputRecordsSize(sourceSizeBytes);
try {
cubeManager.promoteNewlyOptimizeSegments(cube, segment);
return new ExecuteResult();
} catch (IOException e) {
logger.error("fail to update cube after build", e);
return ExecuteResult.createError(e);
}
}
示例13: doWork
import org.apache.kylin.job.execution.ExecuteResult; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
final CubeManager mgr = CubeManager.getInstance(context.getConfig());
final CubeInstance cube = mgr.getCube(CubingExecutableUtil.getCubeName(this.getParams()));
final CubeSegment newSegment = cube.getSegmentById(CubingExecutableUtil.getSegmentId(this.getParams()));
final List<CubeSegment> mergingSegments = getMergingSegments(cube);
KylinConfig conf = cube.getConfig();
Collections.sort(mergingSegments);
try {
checkLookupSnapshotsMustIncremental(mergingSegments);
// work on copy instead of cached objects
CubeInstance cubeCopy = cube.latestCopyForWrite();
CubeSegment newSegCopy = cubeCopy.getSegmentById(newSegment.getUuid());
makeDictForNewSegment(conf, cubeCopy, newSegCopy, mergingSegments);
makeSnapshotForNewSegment(cubeCopy, newSegCopy, mergingSegments);
CubeUpdate update = new CubeUpdate(cubeCopy);
update.setToUpdateSegs(newSegCopy);
mgr.updateCube(update);
return ExecuteResult.createSucceed();
} catch (IOException e) {
logger.error("fail to merge dictionary or lookup snapshots", e);
return ExecuteResult.createError(e);
}
}
示例14: doWork
import org.apache.kylin.job.execution.ExecuteResult; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
final CubeManager cubeManager = CubeManager.getInstance(context.getConfig());
final CubeInstance cube = cubeManager.getCube(CubingExecutableUtil.getCubeName(this.getParams()));
final CubeSegment segment = cube.getSegmentById(CubingExecutableUtil.getSegmentId(this.getParams()));
CubingJob cubingJob = (CubingJob) getManager().getJob(CubingExecutableUtil.getCubingJobId(this.getParams()));
long sourceCount = cubingJob.findSourceRecordCount();
long sourceSizeBytes = cubingJob.findSourceSizeBytes();
long cubeSizeBytes = cubingJob.findCubeSizeBytes();
segment.setLastBuildJobID(CubingExecutableUtil.getCubingJobId(this.getParams()));
segment.setLastBuildTime(System.currentTimeMillis());
segment.setSizeKB(cubeSizeBytes / 1024);
segment.setInputRecords(sourceCount);
segment.setInputRecordsSize(sourceSizeBytes);
try {
if (segment.isOffsetCube()) {
updateTimeRange(segment);
}
cubeManager.promoteNewlyBuiltSegments(cube, segment);
return new ExecuteResult();
} catch (IOException e) {
logger.error("fail to update cube after build", e);
return ExecuteResult.createError(e);
}
}
示例15: onExecuteFinished
import org.apache.kylin.job.execution.ExecuteResult; //导入依赖的package包/类
@Override
protected void onExecuteFinished(ExecuteResult result, ExecutableContext executableContext) {
long time = 0L;
for (AbstractExecutable task : getTasks()) {
final ExecutableState status = task.getStatus();
if (status != ExecutableState.SUCCEED) {
break;
}
if (task instanceof MapReduceExecutable) {
time += ((MapReduceExecutable) task).getMapReduceWaitTime();
}
}
setMapReduceWaitTime(time);
super.onExecuteFinished(result, executableContext);
}