本文整理汇总了Java中org.apache.kylin.job.exception.ExecuteException类的典型用法代码示例。如果您正苦于以下问题:Java ExecuteException类的具体用法?Java ExecuteException怎么用?Java ExecuteException使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ExecuteException类属于org.apache.kylin.job.exception包,在下文中一共展示了ExecuteException类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: doWork
import org.apache.kylin.job.exception.ExecuteException; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
final CubeManager mgr = CubeManager.getInstance(context.getConfig());
final CubeInstance cube = mgr.getCube(CubingExecutableUtil.getCubeName(this.getParams()));
final CubeSegment optimizeSegment = cube.getSegmentById(CubingExecutableUtil.getSegmentId(this.getParams()));
CubeSegment oldSegment = optimizeSegment.getCubeInstance().getOriginalSegmentToOptimize(optimizeSegment);
Preconditions.checkNotNull(oldSegment,
"cannot find the original segment to be optimized by " + optimizeSegment);
// --- Copy dictionary
optimizeSegment.getDictionaries().putAll(oldSegment.getDictionaries());
optimizeSegment.getSnapshots().putAll(oldSegment.getSnapshots());
optimizeSegment.getRowkeyStats().addAll(oldSegment.getRowkeyStats());
try {
CubeUpdate cubeBuilder = new CubeUpdate(cube);
cubeBuilder.setToUpdateSegs(optimizeSegment);
mgr.updateCube(cubeBuilder);
} catch (IOException e) {
logger.error("fail to merge dictionary or lookup snapshots", e);
return ExecuteResult.createError(e);
}
return new ExecuteResult();
}
示例2: doWork
import org.apache.kylin.job.exception.ExecuteException; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
final CubeManager cubeManager = CubeManager.getInstance(context.getConfig());
final CubeInstance cube = cubeManager.getCube(CubingExecutableUtil.getCubeName(this.getParams()));
Set<Long> recommendCuboids = cube.getCuboidsRecommend();
try {
List<CubeSegment> newSegments = cube.getSegments(SegmentStatusEnum.READY_PENDING);
Map<Long, Long> recommendCuboidsWithStats = CuboidStatsReaderUtil
.readCuboidStatsFromSegments(recommendCuboids, newSegments);
if (recommendCuboidsWithStats == null) {
throw new RuntimeException("Fail to get statistics info for recommended cuboids after optimization!!!");
}
cubeManager.promoteCheckpointOptimizeSegments(cube, recommendCuboidsWithStats,
newSegments.toArray(new CubeSegment[newSegments.size()]));
return new ExecuteResult();
} catch (Exception e) {
logger.error("fail to update cube after build", e);
return ExecuteResult.createError(e);
}
}
示例3: doWork
import org.apache.kylin.job.exception.ExecuteException; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
List<? extends Executable> executables = getTasks();
final int size = executables.size();
for (int i = 0; i < size; ++i) {
Executable subTask = executables.get(i);
ExecutableState state = subTask.getStatus();
if (state == ExecutableState.RUNNING) {
// there is already running subtask, no need to start a new subtask
break;
} else if (state == ExecutableState.STOPPED) {
// the job is paused
break;
} else if (state == ExecutableState.ERROR) {
throw new IllegalStateException(
"invalid subtask state, subtask:" + subTask.getName() + ", state:" + subTask.getStatus());
}
if (subTask.isRunnable()) {
return subTask.execute(context);
}
}
return new ExecuteResult(ExecuteResult.State.SUCCEED);
}
示例4: doWork
import org.apache.kylin.job.exception.ExecuteException; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
doingWork = true;
try {
for (int i = 0; i < 20; i++) {
sleepOneSecond();
if (isDiscarded())
return new ExecuteResult(ExecuteResult.State.STOPPED, "stopped");
}
return new ExecuteResult();
} finally {
doingWork = false;
}
}
示例5: doWork
import org.apache.kylin.job.exception.ExecuteException; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
try {
config = new JobEngineConfig(context.getConfig());
List<String> toDeletePaths = getDeletePaths();
dropHdfsPathOnCluster(toDeletePaths, HadoopUtil.getWorkingFileSystem());
if (StringUtils.isNotEmpty(context.getConfig().getHBaseClusterFs())) {
dropHdfsPathOnCluster(toDeletePaths, FileSystem.get(HBaseConnection.getCurrentHBaseConfiguration()));
}
} catch (IOException e) {
logger.error("job:" + getId() + " execute finished with exception", e);
output.append("\n").append(e.getLocalizedMessage());
return new ExecuteResult(ExecuteResult.State.ERROR, output.toString(), e);
}
return new ExecuteResult(ExecuteResult.State.SUCCEED, output.toString());
}
示例6: execute
import org.apache.kylin.job.exception.ExecuteException; //导入依赖的package包/类
@Override
public final ExecuteResult execute(ExecutableContext executableContext) throws ExecuteException {
//print a eye-catching title in log
LogTitlePrinter.printTitle(this.getName());
Preconditions.checkArgument(executableContext instanceof DefaultContext);
ExecuteResult result;
try {
onExecuteStart(executableContext);
result = doWork(executableContext);
} catch (Throwable e) {
logger.error("error running Executable", e);
onExecuteError(e, executableContext);
throw new ExecuteException(e);
}
onExecuteFinished(result, executableContext);
return result;
}
示例7: doWork
import org.apache.kylin.job.exception.ExecuteException; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
KylinConfig conf = context.getConfig();
final CubeManager mgr = CubeManager.getInstance(conf);
final CubeInstance cube = mgr.getCube(getCubeName());
final CubeSegment newSegment = cube.getSegmentById(getSegmentId());
final List<CubeSegment> mergingSegments = getMergingSegments(cube);
Collections.sort(mergingSegments);
try {
checkLookupSnapshotsMustIncremental(mergingSegments);
makeDictForNewSegment(conf, cube, newSegment, mergingSegments);
makeSnapshotForNewSegment(cube, newSegment, mergingSegments);
mgr.updateCube(cube);
return new ExecuteResult(ExecuteResult.State.SUCCEED, "succeed");
} catch (IOException e) {
logger.error("fail to merge dictionary or lookup snapshots", e);
return new ExecuteResult(ExecuteResult.State.ERROR, e.getLocalizedMessage());
}
}
示例8: doWork
import org.apache.kylin.job.exception.ExecuteException; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
KylinConfig config = context.getConfig();
StringBuffer output = new StringBuffer();
try {
output.append(cleanUpIntermediateFlatTable(config));
// don't drop view to avoid concurrent issue
//output.append(cleanUpHiveViewIntermediateTable(config));
} catch (IOException e) {
logger.error("job:" + getId() + " execute finished with exception", e);
return ExecuteResult.createError(e);
}
return new ExecuteResult(ExecuteResult.State.SUCCEED, output.toString());
}
示例9: doWork
import org.apache.kylin.job.exception.ExecuteException; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
KylinConfig config = getCubeSpecificConfig();
try {
createFlatHiveTable(config);
return new ExecuteResult(ExecuteResult.State.SUCCEED, stepLogger.getBufferedLog());
} catch (Exception e) {
logger.error("job:" + getId() + " execute finished with exception", e);
return new ExecuteResult(ExecuteResult.State.ERROR, stepLogger.getBufferedLog(), e);
}
}
示例10: doWork
import org.apache.kylin.job.exception.ExecuteException; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
KylinConfig config = KylinConfig.getInstanceFromEnv();
try {
sqoopFlatHiveTable(config);
return new ExecuteResult(ExecuteResult.State.SUCCEED, stepLogger.getBufferedLog());
} catch (Exception e) {
logger.error("job:" + getId() + " execute finished with exception", e);
return new ExecuteResult(ExecuteResult.State.ERROR, stepLogger.getBufferedLog(), e);
}
}
示例11: doWork
import org.apache.kylin.job.exception.ExecuteException; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
KylinConfig config = KylinConfig.getInstanceFromEnv();
try {
createFlatHiveTable(config);
return new ExecuteResult(ExecuteResult.State.SUCCEED, stepLogger.getBufferedLog());
} catch (Exception e) {
logger.error("job:" + getId() + " execute finished with exception", e);
return new ExecuteResult(ExecuteResult.State.ERROR, stepLogger.getBufferedLog(), e);
}
}
示例12: doWork
import org.apache.kylin.job.exception.ExecuteException; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
if (context.getConfig() == BaseTestDistributedScheduler.kylinConfig1) {
return new ExecuteResult();
} else {
return new ExecuteResult(ExecuteResult.State.ERROR, "error");
}
}
示例13: doWork
import org.apache.kylin.job.exception.ExecuteException; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
final CubeManager cubeManager = CubeManager.getInstance(context.getConfig());
final CubeInstance cube = cubeManager.getCube(CubingExecutableUtil.getCubeName(this.getParams()));
final CubeSegment segment = cube.getSegmentById(CubingExecutableUtil.getSegmentId(this.getParams()));
CubeSegment originalSegment = cube.getOriginalSegmentToOptimize(segment);
long sourceCount = originalSegment.getInputRecords();
long sourceSizeBytes = originalSegment.getInputRecordsSize();
CubingJob cubingJob = (CubingJob) getManager().getJob(CubingExecutableUtil.getCubingJobId(this.getParams()));
long cubeSizeBytes = cubingJob.findCubeSizeBytes();
segment.setLastBuildJobID(CubingExecutableUtil.getCubingJobId(this.getParams()));
segment.setLastBuildTime(System.currentTimeMillis());
segment.setSizeKB(cubeSizeBytes / 1024);
segment.setInputRecords(sourceCount);
segment.setInputRecordsSize(sourceSizeBytes);
try {
cubeManager.promoteNewlyOptimizeSegments(cube, segment);
return new ExecuteResult();
} catch (IOException e) {
logger.error("fail to update cube after build", e);
return ExecuteResult.createError(e);
}
}
示例14: doWork
import org.apache.kylin.job.exception.ExecuteException; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
final CubeManager mgr = CubeManager.getInstance(context.getConfig());
final CubeInstance cube = mgr.getCube(CubingExecutableUtil.getCubeName(this.getParams()));
final CubeSegment newSegment = cube.getSegmentById(CubingExecutableUtil.getSegmentId(this.getParams()));
final List<CubeSegment> mergingSegments = getMergingSegments(cube);
KylinConfig conf = cube.getConfig();
Collections.sort(mergingSegments);
try {
checkLookupSnapshotsMustIncremental(mergingSegments);
// work on copy instead of cached objects
CubeInstance cubeCopy = cube.latestCopyForWrite();
CubeSegment newSegCopy = cubeCopy.getSegmentById(newSegment.getUuid());
makeDictForNewSegment(conf, cubeCopy, newSegCopy, mergingSegments);
makeSnapshotForNewSegment(cubeCopy, newSegCopy, mergingSegments);
CubeUpdate update = new CubeUpdate(cubeCopy);
update.setToUpdateSegs(newSegCopy);
mgr.updateCube(update);
return ExecuteResult.createSucceed();
} catch (IOException e) {
logger.error("fail to merge dictionary or lookup snapshots", e);
return ExecuteResult.createError(e);
}
}
示例15: doWork
import org.apache.kylin.job.exception.ExecuteException; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
final CubeManager cubeManager = CubeManager.getInstance(context.getConfig());
final CubeInstance cube = cubeManager.getCube(CubingExecutableUtil.getCubeName(this.getParams()));
final CubeSegment segment = cube.getSegmentById(CubingExecutableUtil.getSegmentId(this.getParams()));
CubingJob cubingJob = (CubingJob) getManager().getJob(CubingExecutableUtil.getCubingJobId(this.getParams()));
long sourceCount = cubingJob.findSourceRecordCount();
long sourceSizeBytes = cubingJob.findSourceSizeBytes();
long cubeSizeBytes = cubingJob.findCubeSizeBytes();
segment.setLastBuildJobID(CubingExecutableUtil.getCubingJobId(this.getParams()));
segment.setLastBuildTime(System.currentTimeMillis());
segment.setSizeKB(cubeSizeBytes / 1024);
segment.setInputRecords(sourceCount);
segment.setInputRecordsSize(sourceSizeBytes);
try {
if (segment.isOffsetCube()) {
updateTimeRange(segment);
}
cubeManager.promoteNewlyBuiltSegments(cube, segment);
return new ExecuteResult();
} catch (IOException e) {
logger.error("fail to update cube after build", e);
return ExecuteResult.createError(e);
}
}